summaryrefslogtreecommitdiffstats
path: root/python.d
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--collectors/charts.d.plugin/apcupsd/README.md (renamed from python.d/python_modules/bases/FrameworkServices/__init__.py)0
-rw-r--r--collectors/charts.d.plugin/opensips/README.md (renamed from python.d/python_modules/bases/__init__.py)0
-rw-r--r--collectors/python.d.plugin/Makefile.am (renamed from python.d/Makefile.am)150
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py (renamed from python.d/apache.chart.py)57
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py (renamed from python.d/beanstalk.chart.py)43
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py (renamed from python.d/bind_rndc.chart.py)77
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py (renamed from python.d/ceph.chart.py)86
-rw-r--r--collectors/python.d.plugin/chrony/chrony.chart.py (renamed from python.d/chrony.chart.py)69
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py (renamed from python.d/couchdb.chart.py)3
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.chart.py (renamed from python.d/cpufreq.chart.py)12
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.chart.py (renamed from python.d/cpuidle.chart.py)8
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py (renamed from python.d/dns_query_time.chart.py)27
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py (renamed from python.d/dovecot.chart.py)87
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py (renamed from python.d/elasticsearch.chart.py)220
-rw-r--r--collectors/python.d.plugin/example/example.chart.py (renamed from python.d/example.chart.py)3
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py (renamed from python.d/exim.chart.py)8
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py (renamed from python.d/freeradius.chart.py)24
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py (renamed from python.d/go_expvar.chart.py)22
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py (renamed from python.d/haproxy.chart.py)243
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py (renamed from python.d/httpcheck.chart.py)18
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py (renamed from python.d/icecast.chart.py)15
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py (renamed from python.d/ipfs.chart.py)60
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py (renamed from python.d/isc_dhcpd.chart.py)20
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py (renamed from python.d/memcached.chart.py)61
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py (renamed from python.d/mongodb.chart.py)150
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py (renamed from python.d/mysql.chart.py)451
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py (renamed from python.d/nginx.chart.py)27
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py (renamed from python.d/nginx_plus.chart.py)107
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py (renamed from python.d/nsd.chart.py)45
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py (renamed from python.d/ntpd.chart.py)80
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py (renamed from python.d/ovpn_status_log.chart.py)27
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py (renamed from python.d/phpfpm.chart.py)20
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py (renamed from python.d/portcheck.chart.py)12
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py (renamed from python.d/postfix.chart.py)15
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py (renamed from python.d/postgres.chart.py)593
-rw-r--r--collectors/python.d.plugin/python_modules/__init__.py (renamed from python.d/python_modules/third_party/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py (renamed from python.d/python_modules/bases/FrameworkServices/ExecutableService.py)9
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py (renamed from python.d/python_modules/bases/FrameworkServices/LogService.py)2
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py (renamed from python.d/python_modules/bases/FrameworkServices/MySQLService.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py (renamed from python.d/python_modules/bases/FrameworkServices/SimpleService.py)13
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py (renamed from python.d/python_modules/bases/FrameworkServices/SocketService.py)64
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py (renamed from python.d/python_modules/bases/FrameworkServices/UrlService.py)32
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py (renamed from python.d/python_modules/urllib3/contrib/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/__init__.py (renamed from python.d/python_modules/urllib3/contrib/_securetransport/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py (renamed from python.d/python_modules/bases/charts.py)18
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py (renamed from python.d/python_modules/bases/collection.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loaders.py (renamed from python.d/python_modules/bases/loaders.py)21
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py (renamed from python.d/python_modules/bases/loggers.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/__init__.py (renamed from python.d/python_modules/pyyaml2/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/composer.py (renamed from python.d/python_modules/pyyaml2/composer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/constructor.py (renamed from python.d/python_modules/pyyaml2/constructor.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py (renamed from python.d/python_modules/pyyaml2/cyaml.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/dumper.py (renamed from python.d/python_modules/pyyaml2/dumper.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/emitter.py (renamed from python.d/python_modules/pyyaml2/emitter.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/error.py (renamed from python.d/python_modules/pyyaml2/error.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/events.py (renamed from python.d/python_modules/pyyaml2/events.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/loader.py (renamed from python.d/python_modules/pyyaml2/loader.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/nodes.py (renamed from python.d/python_modules/pyyaml2/nodes.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/parser.py (renamed from python.d/python_modules/pyyaml2/parser.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/reader.py (renamed from python.d/python_modules/pyyaml2/reader.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/representer.py (renamed from python.d/python_modules/pyyaml2/representer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/resolver.py (renamed from python.d/python_modules/pyyaml2/resolver.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/scanner.py (renamed from python.d/python_modules/pyyaml2/scanner.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/serializer.py (renamed from python.d/python_modules/pyyaml2/serializer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/tokens.py (renamed from python.d/python_modules/pyyaml2/tokens.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/__init__.py (renamed from python.d/python_modules/pyyaml3/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/composer.py (renamed from python.d/python_modules/pyyaml3/composer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/constructor.py (renamed from python.d/python_modules/pyyaml3/constructor.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py (renamed from python.d/python_modules/pyyaml3/cyaml.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/dumper.py (renamed from python.d/python_modules/pyyaml3/dumper.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/emitter.py (renamed from python.d/python_modules/pyyaml3/emitter.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/error.py (renamed from python.d/python_modules/pyyaml3/error.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/events.py (renamed from python.d/python_modules/pyyaml3/events.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/loader.py (renamed from python.d/python_modules/pyyaml3/loader.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/nodes.py (renamed from python.d/python_modules/pyyaml3/nodes.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/parser.py (renamed from python.d/python_modules/pyyaml3/parser.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/reader.py (renamed from python.d/python_modules/pyyaml3/reader.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/representer.py (renamed from python.d/python_modules/pyyaml3/representer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/resolver.py (renamed from python.d/python_modules/pyyaml3/resolver.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/scanner.py (renamed from python.d/python_modules/pyyaml3/scanner.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/serializer.py (renamed from python.d/python_modules/pyyaml3/serializer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/tokens.py (renamed from python.d/python_modules/pyyaml3/tokens.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/__init__.py (renamed from python.d/python_modules/urllib3/packages/backports/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/lm_sensors.py (renamed from python.d/python_modules/third_party/lm_sensors.py)3
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/ordereddict.py (renamed from python.d/python_modules/third_party/ordereddict.py)20
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/__init__.py (renamed from python.d/python_modules/urllib3/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/_collections.py (renamed from python.d/python_modules/urllib3/_collections.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connection.py (renamed from python.d/python_modules/urllib3/connection.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connectionpool.py (renamed from python.d/python_modules/urllib3/connectionpool.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py (renamed from python.d/python_modules/urllib3/contrib/_securetransport/bindings.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py (renamed from python.d/python_modules/urllib3/contrib/_securetransport/low_level.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py (renamed from python.d/python_modules/urllib3/contrib/appengine.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py (renamed from python.d/python_modules/urllib3/contrib/ntlmpool.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py (renamed from python.d/python_modules/urllib3/contrib/pyopenssl.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py (renamed from python.d/python_modules/urllib3/contrib/securetransport.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py (renamed from python.d/python_modules/urllib3/contrib/socks.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/exceptions.py (renamed from python.d/python_modules/urllib3/exceptions.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/fields.py (renamed from python.d/python_modules/urllib3/fields.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/filepost.py (renamed from python.d/python_modules/urllib3/filepost.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py (renamed from python.d/python_modules/urllib3/packages/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py (renamed from python.d/python_modules/urllib3/packages/backports/makefile.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py (renamed from python.d/python_modules/urllib3/packages/ordered_dict.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/six.py (renamed from python.d/python_modules/urllib3/packages/six.py)18
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py (renamed from python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py (renamed from python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py)3
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/poolmanager.py (renamed from python.d/python_modules/urllib3/poolmanager.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/request.py (renamed from python.d/python_modules/urllib3/request.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/response.py (renamed from python.d/python_modules/urllib3/response.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/__init__.py (renamed from python.d/python_modules/urllib3/util/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/connection.py (renamed from python.d/python_modules/urllib3/util/connection.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/request.py (renamed from python.d/python_modules/urllib3/util/request.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/response.py (renamed from python.d/python_modules/urllib3/util/response.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/retry.py (renamed from python.d/python_modules/urllib3/util/retry.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/selectors.py (renamed from python.d/python_modules/urllib3/util/selectors.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py (renamed from python.d/python_modules/urllib3/util/ssl_.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/timeout.py (renamed from python.d/python_modules/urllib3/util/timeout.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/url.py (renamed from python.d/python_modules/urllib3/util/url.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/wait.py (renamed from python.d/python_modules/urllib3/util/wait.py)1
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py (renamed from python.d/rabbitmq.chart.py)112
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py (renamed from python.d/retroshare.chart.py)10
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py (renamed from python.d/sensors.chart.py)29
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py (renamed from python.d/smartd_log.chart.py)9
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py (renamed from python.d/springboot.chart.py)96
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py (renamed from python.d/squid.chart.py)75
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py (renamed from python.d/tomcat.chart.py)110
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py (renamed from python.d/traefik.chart.py)40
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py (renamed from python.d/varnish.chart.py)29
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py (renamed from python.d/web_log.chart.py)359
-rw-r--r--python.d/Makefile.in1107
-rw-r--r--python.d/README.md2363
-rw-r--r--python.d/dnsdist.chart.py101
-rw-r--r--python.d/fail2ban.chart.py213
-rw-r--r--python.d/hddtemp.chart.py110
-rw-r--r--python.d/mdstat.chart.py189
-rw-r--r--python.d/powerdns.chart.py58
-rw-r--r--python.d/python-modules-installer.sh158
-rw-r--r--python.d/python_modules/__init__.py1
-rw-r--r--python.d/python_modules/base.py9
-rw-r--r--python.d/redis.chart.py200
-rw-r--r--python.d/samba.chart.py126
140 files changed, 2499 insertions, 6115 deletions
diff --git a/python.d/python_modules/bases/FrameworkServices/__init__.py b/collectors/charts.d.plugin/apcupsd/README.md
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/bases/FrameworkServices/__init__.py
+++ b/collectors/charts.d.plugin/apcupsd/README.md
diff --git a/python.d/python_modules/bases/__init__.py b/collectors/charts.d.plugin/opensips/README.md
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/bases/__init__.py
+++ b/collectors/charts.d.plugin/opensips/README.md
diff --git a/python.d/Makefile.am b/collectors/python.d.plugin/Makefile.am
index a5fcc7394..5f214e436 100644
--- a/python.d/Makefile.am
+++ b/collectors/python.d.plugin/Makefile.am
@@ -1,73 +1,110 @@
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
CLEANFILES = \
- python-modules-installer.sh \
+ python.d.plugin \
$(NULL)
include $(top_srcdir)/build/subst.inc
-
SUFFIXES = .in
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ python.d.plugin.in \
+ README.md \
+ $(NULL)
+
dist_python_SCRIPTS = \
- python-modules-installer.sh \
$(NULL)
dist_python_DATA = \
- README.md \
- apache.chart.py \
- beanstalk.chart.py \
- bind_rndc.chart.py \
- ceph.chart.py \
- chrony.chart.py \
- couchdb.chart.py \
- cpufreq.chart.py \
- cpuidle.chart.py \
- dns_query_time.chart.py \
- dnsdist.chart.py \
- dovecot.chart.py \
- elasticsearch.chart.py \
- example.chart.py \
- exim.chart.py \
- fail2ban.chart.py \
- freeradius.chart.py \
- go_expvar.chart.py \
- haproxy.chart.py \
- hddtemp.chart.py \
- httpcheck.chart.py \
- icecast.chart.py \
- ipfs.chart.py \
- isc_dhcpd.chart.py \
- mdstat.chart.py \
- memcached.chart.py \
- mongodb.chart.py \
- mysql.chart.py \
- nginx.chart.py \
- nginx_plus.chart.py \
- nsd.chart.py \
- ntpd.chart.py \
- ovpn_status_log.chart.py \
- phpfpm.chart.py \
- portcheck.chart.py \
- postfix.chart.py \
- postgres.chart.py \
- powerdns.chart.py \
- rabbitmq.chart.py \
- redis.chart.py \
- retroshare.chart.py \
- samba.chart.py \
- sensors.chart.py \
- springboot.chart.py \
- squid.chart.py \
- smartd_log.chart.py \
- tomcat.chart.py \
- traefik.chart.py \
- varnish.chart.py \
- web_log.chart.py \
$(NULL)
+userpythonconfigdir=$(configdir)/python.d
+dist_userpythonconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+pythonconfigdir=$(libconfigdir)/python.d
+dist_pythonconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+include adaptec_raid/Makefile.inc
+include apache/Makefile.inc
+include beanstalk/Makefile.inc
+include bind_rndc/Makefile.inc
+include boinc/Makefile.inc
+include ceph/Makefile.inc
+include chrony/Makefile.inc
+include couchdb/Makefile.inc
+include cpufreq/Makefile.inc
+include cpuidle/Makefile.inc
+include dnsdist/Makefile.inc
+include dns_query_time/Makefile.inc
+include dockerd/Makefile.inc
+include dovecot/Makefile.inc
+include elasticsearch/Makefile.inc
+include example/Makefile.inc
+include exim/Makefile.inc
+include fail2ban/Makefile.inc
+include freeradius/Makefile.inc
+include go_expvar/Makefile.inc
+include haproxy/Makefile.inc
+include hddtemp/Makefile.inc
+include httpcheck/Makefile.inc
+include icecast/Makefile.inc
+include ipfs/Makefile.inc
+include isc_dhcpd/Makefile.inc
+include linux_power_supply/Makefile.inc
+include litespeed/Makefile.inc
+include logind/Makefile.inc
+include mdstat/Makefile.inc
+include megacli/Makefile.inc
+include memcached/Makefile.inc
+include mongodb/Makefile.inc
+include monit/Makefile.inc
+include mysql/Makefile.inc
+include nginx/Makefile.inc
+include nginx_plus/Makefile.inc
+include nsd/Makefile.inc
+include ntpd/Makefile.inc
+include ovpn_status_log/Makefile.inc
+include phpfpm/Makefile.inc
+include portcheck/Makefile.inc
+include postfix/Makefile.inc
+include postgres/Makefile.inc
+include powerdns/Makefile.inc
+include proxysql/Makefile.inc
+include puppet/Makefile.inc
+include rabbitmq/Makefile.inc
+include redis/Makefile.inc
+include rethinkdbs/Makefile.inc
+include retroshare/Makefile.inc
+include samba/Makefile.inc
+include sensors/Makefile.inc
+include smartd_log/Makefile.inc
+include spigotmc/Makefile.inc
+include springboot/Makefile.inc
+include squid/Makefile.inc
+include tomcat/Makefile.inc
+include traefik/Makefile.inc
+include unbound/Makefile.inc
+include uwsgi/Makefile.inc
+include varnish/Makefile.inc
+include w1sensor/Makefile.inc
+include web_log/Makefile.inc
+
pythonmodulesdir=$(pythondir)/python_modules
dist_pythonmodules_DATA = \
python_modules/__init__.py \
- python_modules/base.py \
$(NULL)
basesdir=$(pythonmodulesdir)/bases
@@ -95,6 +132,9 @@ dist_third_party_DATA = \
python_modules/third_party/__init__.py \
python_modules/third_party/ordereddict.py \
python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
$(NULL)
pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
diff --git a/python.d/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
index 789b3c099..d136274d0 100644
--- a/python.d/apache.chart.py
+++ b/collectors/python.d.plugin/apache/apache.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: apache netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.UrlService import UrlService
@@ -25,63 +26,65 @@ CHARTS = {
'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
'statistics', 'apache.bytesperreq', 'area'],
'lines': [
- ["size_req"]
+ ['size_req']
]},
'workers': {
'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
'lines': [
- ["idle"],
- ["busy"],
+ ['idle'],
+ ['busy'],
]},
'reqpersec': {
'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
'apache.reqpersec', 'area'],
'lines': [
- ["requests_sec"]
+ ['requests_sec']
]},
'bytespersec': {
'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
'apache.bytesperreq', 'area'],
'lines': [
- ["size_sec", None, 'absolute', 8, 1000]
+ ['size_sec', None, 'absolute', 8, 1000]
]},
'requests': {
'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
'lines': [
- ["requests", None, 'incremental']
+ ['requests', None, 'incremental']
]},
'net': {
'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
'lines': [
- ["sent", None, 'incremental', 8, 1]
+ ['sent', None, 'incremental', 8, 1]
]},
'connections': {
'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'],
'lines': [
- ["connections"]
+ ['connections']
]},
'conns_async': {
'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
'lines': [
- ["keepalive"],
- ["closing"],
- ["writing"]
+ ['keepalive'],
+ ['closing'],
+ ['writing']
]}
}
-ASSIGNMENT = {"BytesPerReq": 'size_req',
- "IdleWorkers": 'idle',
- "IdleServers": 'idle_servers',
- "BusyWorkers": 'busy',
- "BusyServers": 'busy_servers',
- "ReqPerSec": 'requests_sec',
- "BytesPerSec": 'size_sec',
- "Total Accesses": 'requests',
- "Total kBytes": 'sent',
- "ConnsTotal": 'connections',
- "ConnsAsyncKeepAlive": 'keepalive',
- "ConnsAsyncClosing": 'closing',
- "ConnsAsyncWriting": 'writing'}
+ASSIGNMENT = {
+ 'BytesPerReq': 'size_req',
+ 'IdleWorkers': 'idle',
+ 'IdleServers': 'idle_servers',
+ 'BusyWorkers': 'busy',
+ 'BusyServers': 'busy_servers',
+ 'ReqPerSec': 'requests_sec',
+ 'BytesPerSec': 'size_sec',
+ 'Total Accesses': 'requests',
+ 'Total kBytes': 'sent',
+ 'ConnsTotal': 'connections',
+ 'ConnsAsyncKeepAlive': 'keepalive',
+ 'ConnsAsyncClosing': 'closing',
+ 'ConnsAsyncWriting': 'writing'
+}
class Service(UrlService):
@@ -102,8 +105,8 @@ class Service(UrlService):
for chart in self.definitions:
if chart == 'workers':
lines = self.definitions[chart]['lines']
- lines[0] = ["idle_servers", 'idle']
- lines[1] = ["busy_servers", 'busy']
+ lines[0] = ['idle_servers', 'idle']
+ lines[1] = ['busy_servers', 'busy']
opts = self.definitions[chart]['options']
opts[1] = opts[1].replace('apache', 'lighttpd')
opts[4] = opts[4].replace('apache', 'lighttpd')
@@ -120,7 +123,7 @@ class Service(UrlService):
data = dict()
for row in raw_data.split('\n'):
- tmp = row.split(":")
+ tmp = row.split(':')
if tmp[0] in ASSIGNMENT:
try:
data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
diff --git a/python.d/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
index 8880afdd9..1472b4e1a 100644
--- a/python.d/beanstalk.chart.py
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: beanstalk netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
try:
import beanstalkc
@@ -8,13 +9,8 @@ try:
except ImportError:
BEANSTALKC = False
-try:
- import yaml
- YAML = True
-except ImportError:
- YAML = False
-
from bases.FrameworkServices.SimpleService import SimpleService
+from bases.loaders import safe_load
# default module values (can be overridden per job in `config`)
# update_every = 2
@@ -114,12 +110,13 @@ CHARTS = {
def tube_chart_template(name):
- order = ['{0}_jobs_rate'.format(name),
- '{0}_jobs'.format(name),
- '{0}_connections'.format(name),
- '{0}_commands'.format(name),
- '{0}_pause'.format(name)
- ]
+ order = [
+ '{0}_jobs_rate'.format(name),
+ '{0}_jobs'.format(name),
+ '{0}_connections'.format(name),
+ '{0}_commands'.format(name),
+ '{0}_pause'.format(name)
+ ]
family = 'tube {0}'.format(name)
charts = {
@@ -127,7 +124,8 @@ def tube_chart_template(name):
'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'],
'lines': [
['_'.join([name, 'total-jobs']), 'jobs', 'incremental']
- ]},
+ ]
+ },
order[1]: {
'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'],
'lines': [
@@ -136,27 +134,30 @@ def tube_chart_template(name):
['_'.join([name, 'current-jobs-reserved']), 'reserved'],
['_'.join([name, 'current-jobs-delayed']), 'delayed'],
['_'.join([name, 'current-jobs-buried']), 'buried']
- ]},
+ ]
+ },
order[2]: {
'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'],
'lines': [
['_'.join([name, 'current-using']), 'using'],
['_'.join([name, 'current-waiting']), 'waiting'],
['_'.join([name, 'current-watching']), 'watching']
- ]},
+ ]
+ },
order[3]: {
'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'],
'lines': [
['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'],
['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental']
- ]},
+ ]
+ },
order[4]: {
'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'],
'lines': [
['_'.join([name, 'pause']), 'since'],
['_'.join([name, 'pause-time-left']), 'left']
- ]}
-
+ ]
+ }
}
return order, charts
@@ -176,10 +177,6 @@ class Service(SimpleService):
self.error("'beanstalkc' module is needed to use beanstalk.chart.py")
return False
- if not YAML:
- self.error("'yaml' module is needed to use beanstalk.chart.py")
- return False
-
self.conn = self.connect()
return True if self.conn else False
@@ -231,7 +228,7 @@ class Service(SimpleService):
return beanstalkc.Connection(host=host,
port=port,
connect_timeout=timeout,
- parse_yaml=yaml.load)
+ parse_yaml=safe_load)
except beanstalkc.SocketError as error:
self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error))
return None
diff --git a/python.d/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
index cc96659b2..423232f65 100644
--- a/python.d/bind_rndc.chart.py
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: bind rndc netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
@@ -35,56 +36,50 @@ CHARTS = {
['nms_dropped_queries', 'dropped_queries', 'incremental'],
]},
'incoming_queries': {
- 'options': [None, 'Incoming Queries', 'queries', 'incoming queries',
- 'bind_rndc.incoming_queries', 'line'],
+ 'options': [None, 'Incoming Queries', 'queries', 'incoming queries', 'bind_rndc.incoming_queries', 'line'],
'lines': [
]},
'outgoing_queries': {
- 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries',
- 'bind_rndc.outgoing_queries', 'line'],
+ 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries', 'bind_rndc.outgoing_queries', 'line'],
'lines': [
]},
'named_stats_size': {
- 'options': [None, 'Named Stats File Size', 'MB', 'file size',
- 'bind_rndc.stats_size', 'line'],
+ 'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'],
'lines': [
['stats_size', None, 'absolute', 1, 1 << 20]
- ]}
+ ]
+ }
}
NMS = {
- 'nms_requests':
- ['IPv4 requests received',
- 'IPv6 requests received',
- 'TCP requests received',
- 'requests with EDNS(0) receive'],
- 'nms_responses':
- ['responses sent',
- 'truncated responses sent',
- 'responses with EDNS(0) sent',
- 'requests with unsupported EDNS version received'],
- 'nms_failure':
- ['other query failures',
- 'queries resulted in SERVFAIL'],
- 'nms_auth_answer':
- ['queries resulted in authoritative answer'],
- 'nms_non_auth_answer':
- ['queries resulted in non authoritative answer'],
- 'nms_nxrrset':
- ['queries resulted in nxrrset'],
- 'nms_success':
- ['queries resulted in successful answer'],
- 'nms_nxdomain':
- ['queries resulted in NXDOMAIN'],
- 'nms_recursion':
- ['queries caused recursion'],
- 'nms_duplicate':
- ['duplicate queries received'],
- 'nms_rejected_queries':
- ['auth queries rejected',
- 'recursive queries rejected'],
- 'nms_dropped_queries':
- ['queries dropped']
+ 'nms_requests': [
+ 'IPv4 requests received',
+ 'IPv6 requests received',
+ 'TCP requests received',
+ 'requests with EDNS(0) receive'
+ ],
+ 'nms_responses': [
+ 'responses sent',
+ 'truncated responses sent',
+ 'responses with EDNS(0) sent',
+ 'requests with unsupported EDNS version received'
+ ],
+ 'nms_failure': [
+ 'other query failures',
+ 'queries resulted in SERVFAIL'
+ ],
+ 'nms_auth_answer': ['queries resulted in authoritative answer'],
+ 'nms_non_auth_answer': ['queries resulted in non authoritative answer'],
+ 'nms_nxrrset': ['queries resulted in nxrrset'],
+ 'nms_success': ['queries resulted in successful answer'],
+ 'nms_nxdomain': ['queries resulted in NXDOMAIN'],
+ 'nms_recursion': ['queries caused recursion'],
+ 'nms_duplicate': ['duplicate queries received'],
+ 'nms_rejected_queries': [
+ 'auth queries rejected',
+ 'recursive queries rejected'
+ ],
+ 'nms_dropped_queries': ['queries dropped']
}
STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
@@ -215,7 +210,9 @@ def parse_stats(field, named_stats):
if '[' in line:
continue
v, k = line.strip().split(' ', 1)
- data[k] = int(v)
+ if k not in data:
+ data[k] = 0
+ data[k] += int(v)
continue
break
break
diff --git a/python.d/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
index fb78397d0..31c764d0f 100644
--- a/python.d/ceph.chart.py
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: ceph netdata python.d module
# Author: Luis Eduardo (lets00)
+# SPDX-License-Identifier: GPL-3.0-or-later
try:
import rados
@@ -8,6 +9,7 @@ try:
except ImportError:
CEPH = False
+import os
import json
from bases.FrameworkServices.SimpleService import SimpleService
@@ -16,17 +18,29 @@ update_every = 10
priority = 60000
retries = 60
-ORDER = ['general_usage', 'general_objects', 'general_bytes', 'general_operations',
- 'general_latency', 'pool_usage', 'pool_objects', 'pool_read_bytes',
- 'pool_write_bytes', 'pool_read_operations', 'pool_write_operations', 'osd_usage',
- 'osd_apply_latency', 'osd_commit_latency']
+ORDER = [
+ 'general_usage',
+ 'general_objects',
+ 'general_bytes',
+ 'general_operations',
+ 'general_latency',
+ 'pool_usage',
+ 'pool_objects',
+ 'pool_read_bytes',
+ 'pool_write_bytes',
+ 'pool_read_operations',
+ 'pool_write_operations',
+ 'osd_usage',
+ 'osd_apply_latency',
+ 'osd_commit_latency'
+]
CHARTS = {
'general_usage': {
'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'],
'lines': [
- ['general_available', 'avail', 'absolute', 1, 1024],
- ['general_usage', 'used', 'absolute', 1, 1024]
+ ['general_available', 'avail', 'absolute'],
+ ['general_usage', 'used', 'absolute']
]
},
'general_objects': {
@@ -118,6 +132,20 @@ class Service(SimpleService):
if not (self.config_file and self.keyring_file):
self.error('config_file and/or keyring_file is not defined')
return False
+
+ # Verify files and permissions
+ if not (os.access(self.config_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.keyring_file))
+ return False
+ if not (os.access(self.config_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file))
+ return False
try:
self.cluster = rados.Rados(conffile=self.config_file,
conf=dict(keyring=self.keyring_file))
@@ -148,11 +176,11 @@ class Service(SimpleService):
pool['name'],
'absolute', 1, 1024])
self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
+ pool['name'],
+ 'absolute'])
self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
+ pool['name'],
+ 'absolute'])
# OSD lines
for osd in sorted(self._get_osd_df()['nodes']):
@@ -214,16 +242,17 @@ class Service(SimpleService):
apply_latency += perf['perf_stats']['apply_latency_ms']
commit_latency += perf['perf_stats']['commit_latency_ms']
- return {'general_usage': int(status['kb_used']),
- 'general_available': int(status['kb_avail']),
- 'general_objects': int(status['num_objects']),
- 'general_read_bytes': read_bytes_sec,
- 'general_write_bytes': write_bytes_sec,
- 'general_read_operations': read_op_per_sec,
- 'general_write_operations': write_op_per_sec,
- 'general_apply_latency': apply_latency,
- 'general_commit_latency': commit_latency
- }
+ return {
+ 'general_usage': int(status['kb_used']),
+ 'general_available': int(status['kb_avail']),
+ 'general_objects': int(status['num_objects']),
+ 'general_read_bytes': read_bytes_sec,
+ 'general_write_bytes': write_bytes_sec,
+ 'general_read_operations': read_op_per_sec,
+ 'general_write_operations': write_op_per_sec,
+ 'general_apply_latency': apply_latency,
+ 'general_commit_latency': commit_latency
+ }
@staticmethod
def _get_pool_usage(pool):
@@ -247,11 +276,12 @@ class Service(SimpleService):
Get read/write kb and operations in a pool
:return: A pool dict with both read/write bytes and operations.
"""
- return {'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
- 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
- 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
- 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
- }
+ return {
+ 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
+ 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
+ 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
+ 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
+ }
@staticmethod
def _get_osd_usage(osd):
@@ -267,8 +297,10 @@ class Service(SimpleService):
Get ceph osd apply and commit latency
:return: A osd dict with osd name's key with both apply and commit latency values
"""
- return {'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
- 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']}
+ return {
+ 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
+ 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']
+ }
def _get_df(self):
"""
diff --git a/python.d/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py
index 8f331fa50..fd01d4e85 100644
--- a/python.d/chrony.chart.py
+++ b/collectors/python.d.plugin/chrony/chrony.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: chrony netdata python.d module
# Author: Dominik Schloesser (domschl)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.ExecutableService import ExecutableService
@@ -13,66 +14,70 @@ retries = 10
ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
CHARTS = {
- # id: {
- # 'options': [name, title, units, family, context, charttype],
- # 'lines': [
- # [unique_dimension_name, name, algorithm, multiplier, divisor]
- # ]}
'system': {
- 'options': [None, "Chrony System Time Deltas", "microseconds", 'system', 'chrony.system', 'area'],
+ 'options': [None, 'Chrony System Time Deltas', 'microseconds', 'system', 'chrony.system', 'area'],
'lines': [
- ['timediff', 'system time', 'absolute', 1, 1000]
- ]},
+ ['timediff', 'system time', 'absolute', 1, 1000]
+ ]
+ },
'offsets': {
- 'options': [None, "Chrony System Time Offsets", "microseconds", 'system', 'chrony.offsets', 'area'],
+ 'options': [None, 'Chrony System Time Offsets', 'microseconds', 'system', 'chrony.offsets', 'area'],
'lines': [
['lastoffset', 'last offset', 'absolute', 1, 1000],
- ['rmsoffset', 'RMS offset', 'absolute', 1, 1000]
- ]},
+ ['rmsoffset', 'RMS offset', 'absolute', 1, 1000]
+ ]
+ },
'stratum': {
- 'options': [None, "Chrony Stratum", "stratum", 'root', 'chrony.stratum', 'line'],
+ 'options': [None, 'Chrony Stratum', 'stratum', 'root', 'chrony.stratum', 'line'],
'lines': [
['stratum', None, 'absolute', 1, 1]
- ]},
+ ]
+ },
'root': {
- 'options': [None, "Chrony Root Delays", "milliseconds", 'root', 'chrony.root', 'line'],
+ 'options': [None, 'Chrony Root Delays', 'milliseconds', 'root', 'chrony.root', 'line'],
'lines': [
- ['rootdelay', 'delay', 'absolute', 1, 1000000],
+ ['rootdelay', 'delay', 'absolute', 1, 1000000],
['rootdispersion', 'dispersion', 'absolute', 1, 1000000]
- ]},
+ ]
+ },
'frequency': {
- 'options': [None, "Chrony Frequency", "ppm", 'frequencies', 'chrony.frequency', 'area'],
+ 'options': [None, 'Chrony Frequency', 'ppm', 'frequencies', 'chrony.frequency', 'area'],
'lines': [
['frequency', None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'residualfreq': {
- 'options': [None, "Chrony Residual frequency", "ppm", 'frequencies', 'chrony.residualfreq', 'area'],
+ 'options': [None, 'Chrony Residual frequency', 'ppm', 'frequencies', 'chrony.residualfreq', 'area'],
'lines': [
['residualfreq', 'residual frequency', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'skew': {
- 'options': [None, "Chrony Skew, error bound on frequency", "ppm", 'frequencies', 'chrony.skew', 'area'],
+ 'options': [None, 'Chrony Skew, error bound on frequency', 'ppm', 'frequencies', 'chrony.skew', 'area'],
'lines': [
['skew', None, 'absolute', 1, 1000]
- ]}
+ ]
+ }
}
-CHRONY = [('Frequency', 'frequency', 1e3),
- ('Last offset', 'lastoffset', 1e9),
- ('RMS offset', 'rmsoffset', 1e9),
- ('Residual freq', 'residualfreq', 1e3),
- ('Root delay', 'rootdelay', 1e9),
- ('Root dispersion', 'rootdispersion', 1e9),
- ('Skew', 'skew', 1e3),
- ('Stratum', 'stratum', 1),
- ('System time', 'timediff', 1e9)]
+CHRONY = [
+ ('Frequency', 'frequency', 1e3),
+ ('Last offset', 'lastoffset', 1e9),
+ ('RMS offset', 'rmsoffset', 1e9),
+ ('Residual freq', 'residualfreq', 1e3),
+ ('Root delay', 'rootdelay', 1e9),
+ ('Root dispersion', 'rootdispersion', 1e9),
+ ('Skew', 'skew', 1e3),
+ ('Stratum', 'stratum', 1),
+ ('System time', 'timediff', 1e9)
+]
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(
self, configuration=configuration, name=name)
- self.command = "chronyc -n tracking"
+ self.command = 'chronyc -n tracking'
self.order = ORDER
self.definitions = CHARTS
diff --git a/python.d/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
index 558bac587..5d6b9916f 100644
--- a/python.d/couchdb.chart.py
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
@@ -2,6 +2,7 @@
# Description: couchdb netdata python.d module
# Author: wohali <wohali@apache.org>
# Thanks to l2isbad for good examples :)
+# SPDX-License-Identifier: GPL-3.0-or-later
from collections import namedtuple, defaultdict
from json import loads
@@ -24,7 +25,7 @@ METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
OVERVIEW_STATS = [
'couchdb.database_reads.value',
'couchdb.database_writes.value',
- 'couchdb.httpd.view_reads.value'
+ 'couchdb.httpd.view_reads.value',
'couchdb.httpd_request_methods.COPY.value',
'couchdb.httpd_request_methods.DELETE.value',
'couchdb.httpd_request_methods.GET.value',
diff --git a/python.d/cpufreq.chart.py b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
index 3abde736c..cbbab6d7f 100644
--- a/python.d/cpufreq.chart.py
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description: cpufreq netdata python.d module
-# Author: Pawel Krupa (paulfantom) and Steven Noonan (tycho)
+# Author: Pawel Krupa (paulfantom)
+# Author: Steven Noonan (tycho)
+# SPDX-License-Identifier: GPL-3.0-or-later
import glob
import os
@@ -17,7 +19,8 @@ CHARTS = {
'options': [None, 'CPU Clock', 'MHz', 'cpufreq', 'cpufreq.cpufreq', 'line'],
'lines': [
# lines are created dynamically in `check()` method
- ]}
+ ]
+ }
}
@@ -92,7 +95,7 @@ class Service(SimpleService):
self.assignment[cpu]['accurate'] = path
self.accurate_last[cpu] = {}
- if len(self.assignment) == 0:
+ if not self.assignment:
self.accurate_exists = False
for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/scaling_cur_freq'):
@@ -102,7 +105,7 @@ class Service(SimpleService):
self.assignment[cpu] = {}
self.assignment[cpu]['inaccurate'] = path
- if len(self.assignment) == 0:
+ if not self.assignment:
self.error("couldn't find a method to read cpufreq statistics")
return False
@@ -110,4 +113,3 @@ class Service(SimpleService):
self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000])
return True
-
diff --git a/python.d/cpuidle.chart.py b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
index d14c6aaf3..feac025bf 100644
--- a/python.d/cpuidle.chart.py
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
@@ -1,14 +1,15 @@
# -*- coding: utf-8 -*-
# Description: cpuidle netdata python.d module
# Author: Steven Noonan (tycho)
+# SPDX-License-Identifier: GPL-3.0-or-later
+import ctypes
import glob
import os
import platform
from bases.FrameworkServices.SimpleService import SimpleService
-import ctypes
syscall = ctypes.CDLL('libc.so.6').syscall
# default module values (can be overridden per job in `config`)
@@ -107,7 +108,7 @@ class Service(SimpleService):
def check(self):
if self.__gettid() is None:
- self.error("Cannot get thread ID. Stats would be completely broken.")
+ self.error('Cannot get thread ID. Stats would be completely broken.')
return False
for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
@@ -140,9 +141,8 @@ class Service(SimpleService):
# Sort order by kernel-specified CPU index
self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
- if len(self.definitions) == 0:
+ if not self.definitions:
self.error("couldn't find cstate stats")
return False
return True
-
diff --git a/python.d/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
index 9a794a9c9..d3c3db788 100644
--- a/python.d/dns_query_time.chart.py
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
@@ -1,20 +1,25 @@
# -*- coding: utf-8 -*-
# Description: dns_query_time netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from random import choice
-from threading import Thread
from socket import getaddrinfo, gaierror
+from threading import Thread
try:
from time import monotonic as time
except ImportError:
from time import time
+
try:
- import dns.message, dns.query, dns.name
+ import dns.message
+ import dns.query
+ import dns.name
DNS_PYTHON = True
except ImportError:
DNS_PYTHON = False
+
try:
from queue import Queue
except ImportError:
@@ -117,8 +122,12 @@ def check_ns(ns):
def create_charts(aggregate, server_list):
if aggregate:
order = ['dns_group']
- definitions = {'dns_group': {'options': [None, 'DNS Response Time', 'ms', 'name servers',
- 'dns_query_time.response_time', 'line'], 'lines': []}}
+ definitions = {
+ 'dns_group': {
+ 'options': [None, 'DNS Response Time', 'ms', 'name servers', 'dns_query_time.response_time', 'line'],
+ 'lines': []
+ }
+ }
for ns in server_list:
definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
@@ -127,8 +136,10 @@ def create_charts(aggregate, server_list):
order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
definitions = dict()
for ns in server_list:
- definitions[''.join(['dns_', ns.replace('.', '_')])] = {'options': [None, 'DNS Response Time', 'ms', ns,
- 'dns_query_time.response_time', 'area'],
- 'lines': [['_'.join(['ns', ns.replace('.', '_')]),
- ns, 'absolute']]}
+ definitions[''.join(['dns_', ns.replace('.', '_')])] = {
+ 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
+ 'lines': [
+ ['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']
+ ]
+ }
return order, definitions
diff --git a/python.d/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
index 5689f2ec9..7fee3bfac 100644
--- a/python.d/dovecot.chart.py
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: dovecot netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.SocketService import SocketService
@@ -10,93 +11,113 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['sessions', 'logins', 'commands',
- 'faults',
- 'context_switches',
- 'io', 'net', 'syscalls',
- 'lookup', 'cache',
- 'auth', 'auth_cache']
+ORDER = [
+ 'sessions',
+ 'logins',
+ 'commands',
+ 'faults',
+ 'context_switches',
+ 'io',
+ 'net',
+ 'syscalls',
+ 'lookup',
+ 'cache',
+ 'auth',
+ 'auth_cache'
+]
CHARTS = {
'sessions': {
- 'options': [None, "Dovecot Active Sessions", 'number', 'sessions', 'dovecot.sessions', 'line'],
+ 'options': [None, 'Dovecot Active Sessions', 'number', 'sessions', 'dovecot.sessions', 'line'],
'lines': [
['num_connected_sessions', 'active sessions', 'absolute']
- ]},
+ ]
+ },
'logins': {
- 'options': [None, "Dovecot Logins", 'number', 'logins', 'dovecot.logins', 'line'],
+ 'options': [None, 'Dovecot Logins', 'number', 'logins', 'dovecot.logins', 'line'],
'lines': [
['num_logins', 'logins', 'absolute']
- ]},
+ ]
+ },
'commands': {
- 'options': [None, "Dovecot Commands", "commands", 'commands', 'dovecot.commands', 'line'],
+ 'options': [None, 'Dovecot Commands', 'commands', 'commands', 'dovecot.commands', 'line'],
'lines': [
['num_cmds', 'commands', 'absolute']
- ]},
+ ]
+ },
'faults': {
- 'options': [None, "Dovecot Page Faults", "faults", 'page faults', 'dovecot.faults', 'line'],
+ 'options': [None, 'Dovecot Page Faults', 'faults', 'page faults', 'dovecot.faults', 'line'],
'lines': [
['min_faults', 'minor', 'absolute'],
['maj_faults', 'major', 'absolute']
- ]},
+ ]
+ },
'context_switches': {
- 'options': [None, "Dovecot Context Switches", '', 'context switches', 'dovecot.context_switches', 'line'],
+ 'options': [None, 'Dovecot Context Switches', '', 'context switches', 'dovecot.context_switches', 'line'],
'lines': [
['vol_cs', 'voluntary', 'absolute'],
['invol_cs', 'involuntary', 'absolute']
- ]},
+ ]
+ },
'io': {
- 'options': [None, "Dovecot Disk I/O", 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
+ 'options': [None, 'Dovecot Disk I/O', 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
'lines': [
['disk_input', 'read', 'incremental', 1, 1024],
['disk_output', 'write', 'incremental', -1, 1024]
- ]},
+ ]
+ },
'net': {
- 'options': [None, "Dovecot Network Bandwidth", 'kilobits/s', 'network', 'dovecot.net', 'area'],
+ 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
'lines': [
['read_bytes', 'read', 'incremental', 8, 1024],
['write_bytes', 'write', 'incremental', -8, 1024]
- ]},
+ ]
+ },
'syscalls': {
- 'options': [None, "Dovecot Number of SysCalls", 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
+ 'options': [None, 'Dovecot Number of SysCalls', 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
'lines': [
['read_count', 'read', 'incremental'],
['write_count', 'write', 'incremental']
- ]},
+ ]
+ },
'lookup': {
- 'options': [None, "Dovecot Lookups", 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
+ 'options': [None, 'Dovecot Lookups', 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
'lines': [
['mail_lookup_path', 'path', 'incremental'],
['mail_lookup_attr', 'attr', 'incremental']
- ]},
+ ]
+ },
'cache': {
- 'options': [None, "Dovecot Cache Hits", 'hits/s', 'cache', 'dovecot.cache', 'line'],
+ 'options': [None, 'Dovecot Cache Hits', 'hits/s', 'cache', 'dovecot.cache', 'line'],
'lines': [
['mail_cache_hits', 'hits', 'incremental']
- ]},
+ ]
+ },
'auth': {
- 'options': [None, "Dovecot Authentications", 'attempts', 'logins', 'dovecot.auth', 'stacked'],
+ 'options': [None, 'Dovecot Authentications', 'attempts', 'logins', 'dovecot.auth', 'stacked'],
'lines': [
['auth_successes', 'ok', 'absolute'],
['auth_failures', 'failed', 'absolute']
- ]},
+ ]
+ },
'auth_cache': {
- 'options': [None, "Dovecot Authentication Cache", 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
+ 'options': [None, 'Dovecot Authentication Cache', 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
'lines': [
['auth_cache_hits', 'hit', 'absolute'],
['auth_cache_misses', 'miss', 'absolute']
- ]}
+ ]
+ }
}
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
- self.request = "EXPORT\tglobal\r\n"
+ self.request = 'EXPORT\tglobal\r\n'
self.host = None # localhost
self.port = None # 24242
# self._keep_alive = True
- self.unix_socket = "/var/run/dovecot/stats"
+ self.unix_socket = '/var/run/dovecot/stats'
self.order = ORDER
self.definitions = CHARTS
@@ -111,7 +132,7 @@ class Service(SocketService):
return None
if raw is None:
- self.debug("dovecot returned no data")
+ self.debug('dovecot returned no data')
return None
data = raw.split('\n')[:2]
diff --git a/python.d/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
index 9c2c58944..3f431f6e0 100644
--- a/python.d/elasticsearch.chart.py
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
@@ -1,11 +1,14 @@
# -*- coding: utf-8 -*-
# Description: elastic search node stats netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+import threading
from collections import namedtuple
-from json import loads
from socket import gethostbyname, gaierror
-from threading import Thread
+
try:
from queue import Queue
except ImportError:
@@ -15,8 +18,6 @@ from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
update_every = 5
-priority = 60000
-retries = 60
METHODS = namedtuple('METHODS', ['get_data', 'url', 'run'])
@@ -63,6 +64,8 @@ NODE_STATS = [
'jvm.buffer_pools.mapped.total_capacity_in_bytes',
'thread_pool.bulk.queue',
'thread_pool.bulk.rejected',
+ 'thread_pool.write.queue',
+ 'thread_pool.write.rejected',
'thread_pool.index.queue',
'thread_pool.index.rejected',
'thread_pool.search.queue',
@@ -107,30 +110,62 @@ HEALTH_STATS = [
]
LATENCY = {
- 'query_latency':
- {'total': 'indices_search_query_total',
- 'spent_time': 'indices_search_query_time_in_millis'},
- 'fetch_latency':
- {'total': 'indices_search_fetch_total',
- 'spent_time': 'indices_search_fetch_time_in_millis'},
- 'indexing_latency':
- {'total': 'indices_indexing_index_total',
- 'spent_time': 'indices_indexing_index_time_in_millis'},
- 'flushing_latency':
- {'total': 'indices_flush_total',
- 'spent_time': 'indices_flush_total_time_in_millis'}
+ 'query_latency': {
+ 'total': 'indices_search_query_total',
+ 'spent_time': 'indices_search_query_time_in_millis'
+ },
+ 'fetch_latency': {
+ 'total': 'indices_search_fetch_total',
+ 'spent_time': 'indices_search_fetch_time_in_millis'
+ },
+ 'indexing_latency': {
+ 'total': 'indices_indexing_index_total',
+ 'spent_time': 'indices_indexing_index_time_in_millis'
+ },
+ 'flushing_latency': {
+ 'total': 'indices_flush_total',
+ 'spent_time': 'indices_flush_total_time_in_millis'
+ }
}
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['search_performance_total', 'search_performance_current', 'search_performance_time',
- 'search_latency', 'index_performance_total', 'index_performance_current', 'index_performance_time',
- 'index_latency', 'index_translog_operations', 'index_translog_size', 'index_segments_count', 'index_segments_memory_writer',
- 'index_segments_memory', 'jvm_mem_heap', 'jvm_mem_heap_bytes', 'jvm_buffer_pool_count',
- 'jvm_direct_buffers_memory', 'jvm_mapped_buffers_memory', 'jvm_gc_count', 'jvm_gc_time', 'host_metrics_file_descriptors',
- 'host_metrics_http', 'host_metrics_transport', 'thread_pool_queued', 'thread_pool_rejected',
- 'fielddata_cache', 'fielddata_evictions_tripped', 'cluster_health_status', 'cluster_health_nodes',
- 'cluster_health_shards', 'cluster_stats_nodes', 'cluster_stats_query_cache', 'cluster_stats_docs',
- 'cluster_stats_store', 'cluster_stats_indices_shards']
+ORDER = [
+ 'search_performance_total',
+ 'search_performance_current',
+ 'search_performance_time',
+ 'search_latency',
+ 'index_performance_total',
+ 'index_performance_current',
+ 'index_performance_time',
+ 'index_latency',
+ 'index_translog_operations',
+ 'index_translog_size',
+ 'index_segments_count',
+ 'index_segments_memory_writer',
+ 'index_segments_memory',
+ 'jvm_mem_heap',
+ 'jvm_mem_heap_bytes',
+ 'jvm_buffer_pool_count',
+ 'jvm_direct_buffers_memory',
+ 'jvm_mapped_buffers_memory',
+ 'jvm_gc_count',
+ 'jvm_gc_time',
+ 'host_metrics_file_descriptors',
+ 'host_metrics_http',
+ 'host_metrics_transport',
+ 'thread_pool_queued',
+ 'thread_pool_rejected',
+ 'fielddata_cache',
+ 'fielddata_evictions_tripped',
+ 'cluster_health_status',
+ 'cluster_health_nodes',
+ 'cluster_health_shards',
+ 'cluster_stats_nodes',
+ 'cluster_stats_query_cache',
+ 'cluster_stats_docs',
+ 'cluster_stats_store',
+ 'cluster_stats_indices_shards',
+]
CHARTS = {
'search_performance_total': {
@@ -139,27 +174,31 @@ CHARTS = {
'lines': [
['indices_search_query_total', 'queries', 'incremental'],
['indices_search_fetch_total', 'fetches', 'incremental']
- ]},
+ ]
+ },
'search_performance_current': {
'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
'elastic.search_performance_current', 'stacked'],
'lines': [
['indices_search_query_current', 'queries', 'absolute'],
['indices_search_fetch_current', 'fetches', 'absolute']
- ]},
+ ]
+ },
'search_performance_time': {
'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
'elastic.search_performance_time', 'stacked'],
'lines': [
['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000],
['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'search_latency': {
'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'],
'lines': [
['query_latency', 'query', 'absolute', 1, 1000],
['fetch_latency', 'fetch', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'index_performance_total': {
'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
'indexing performance', 'elastic.index_performance_total', 'stacked'],
@@ -167,13 +206,15 @@ CHARTS = {
['indices_indexing_index_total', 'indexed', 'incremental'],
['indices_refresh_total', 'refreshes', 'incremental'],
['indices_flush_total', 'flushes', 'incremental']
- ]},
+ ]
+ },
'index_performance_current': {
'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
'indexing performance', 'elastic.index_performance_current', 'stacked'],
'lines': [
['indices_indexing_index_current', 'documents', 'absolute']
- ]},
+ ]
+ },
'index_performance_time': {
'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
'elastic.index_performance_time', 'stacked'],
@@ -181,40 +222,46 @@ CHARTS = {
['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'index_latency': {
'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
'elastic.index_latency', 'stacked'],
'lines': [
['indexing_latency', 'indexing', 'absolute', 1, 1000],
['flushing_latency', 'flushing', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'index_translog_operations': {
'options': [None, 'Translog Operations', 'count', 'translog',
'elastic.index_translog_operations', 'area'],
'lines': [
['indices_translog_operations', 'total', 'absolute'],
['indices_translog_uncommitted_operations', 'uncommited', 'absolute']
- ]},
+ ]
+ },
'index_translog_size': {
'options': [None, 'Translog Size', 'MB', 'translog',
'elastic.index_translog_size', 'area'],
'lines': [
['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
['indices_translog_uncommitted_size_in_bytes', 'uncommited', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'index_segments_count': {
'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments',
'elastic.index_segments_count', 'line'],
'lines': [
['indices_segments_count', 'segments', 'absolute']
- ]},
+ ]
+ },
'index_segments_memory_writer': {
'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments',
'elastic.index_segments_memory_writer', 'area'],
'lines': [
['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'index_segments_memory': {
'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments',
'elastic.index_segments_memory', 'stacked'],
@@ -227,84 +274,98 @@ CHARTS = {
['indices_segments_doc_values_memory_in_bytes', 'doc values', 'absolute', 1, 1048567],
['indices_segments_version_map_memory_in_bytes', 'version map', 'absolute', 1, 1048567],
['indices_segments_fixed_bit_set_memory_in_bytes', 'fixed bit set', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'jvm_mem_heap': {
'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc',
'elastic.jvm_heap', 'area'],
'lines': [
['jvm_mem_heap_used_percent', 'inuse', 'absolute']
- ]},
+ ]
+ },
'jvm_mem_heap_bytes': {
'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc',
'elastic.jvm_heap_bytes', 'area'],
'lines': [
['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576]
- ]},
+ ]
+ },
'jvm_buffer_pool_count': {
'options': [None, 'JVM Buffers', 'count', 'memory usage and gc',
'elastic.jvm_buffer_pool_count', 'line'],
'lines': [
['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
['jvm_buffer_pools_mapped_count', 'mapped', 'absolute']
- ]},
+ ]
+ },
'jvm_direct_buffers_memory': {
'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc',
'elastic.jvm_direct_buffers_memory', 'area'],
'lines': [
['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
['jvm_buffer_pools_direct_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'jvm_mapped_buffers_memory': {
'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc',
'elastic.jvm_mapped_buffers_memory', 'area'],
'lines': [
['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
['jvm_buffer_pools_mapped_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'jvm_gc_count': {
'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
'lines': [
['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
- ]},
+ ]
+ },
'jvm_gc_time': {
'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc',
'elastic.gc_time', 'stacked'],
'lines': [
['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental']
- ]},
+ ]
+ },
'thread_pool_queued': {
'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
'elastic.thread_pool_queued', 'stacked'],
'lines': [
['thread_pool_bulk_queue', 'bulk', 'absolute'],
+ ['thread_pool_write_queue', 'write', 'absolute'],
['thread_pool_index_queue', 'index', 'absolute'],
['thread_pool_search_queue', 'search', 'absolute'],
['thread_pool_merge_queue', 'merge', 'absolute']
- ]},
+ ]
+ },
'thread_pool_rejected': {
'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
'elastic.thread_pool_rejected', 'stacked'],
'lines': [
['thread_pool_bulk_rejected', 'bulk', 'absolute'],
+ ['thread_pool_write_rejected', 'write', 'absolute'],
['thread_pool_index_rejected', 'index', 'absolute'],
['thread_pool_search_rejected', 'search', 'absolute'],
['thread_pool_merge_rejected', 'merge', 'absolute']
- ]},
+ ]
+ },
'fielddata_cache': {
'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
'lines': [
['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
- ]},
+ ]
+ },
'fielddata_evictions_tripped': {
'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
'lines': [
['indices_fielddata_evictions', 'evictions', 'incremental'],
['indices_fielddata_tripped', 'tripped', 'incremental']
- ]},
+ ]
+ },
'cluster_health_nodes': {
'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
'elastic.cluster_health_nodes', 'stacked'],
@@ -313,7 +374,8 @@ CHARTS = {
['number_of_data_nodes', 'data_nodes', 'absolute'],
['number_of_pending_tasks', 'pending_tasks', 'absolute'],
['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
- ]},
+ ]
+ },
'cluster_health_status': {
'options': [None, 'Cluster Status', 'status', 'cluster health API',
'elastic.cluster_health_status', 'area'],
@@ -324,7 +386,8 @@ CHARTS = {
['status_foo2', None, 'absolute'],
['status_foo3', None, 'absolute'],
['status_yellow', 'yellow', 'absolute']
- ]},
+ ]
+ },
'cluster_health_shards': {
'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
'elastic.cluster_health_shards', 'stacked'],
@@ -335,7 +398,8 @@ CHARTS = {
['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
['initializing_shards', 'initializing', 'absolute'],
['active_shards_percent_as_number', 'active_percent', 'absolute']
- ]},
+ ]
+ },
'cluster_stats_nodes': {
'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
'elastic.cluster_nodes', 'stacked'],
@@ -345,52 +409,60 @@ CHARTS = {
['nodes_count_total', 'total', 'absolute'],
['nodes_count_master_only', 'master_only', 'absolute'],
['nodes_count_client', 'client', 'absolute']
- ]},
+ ]
+ },
'cluster_stats_query_cache': {
'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
'elastic.cluster_query_cache', 'stacked'],
'lines': [
['indices_query_cache_hit_count', 'hit', 'incremental'],
['indices_query_cache_miss_count', 'miss', 'incremental']
- ]},
+ ]
+ },
'cluster_stats_docs': {
'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
'elastic.cluster_docs', 'line'],
'lines': [
['indices_docs_count', 'docs', 'absolute']
- ]},
+ ]
+ },
'cluster_stats_store': {
'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
'elastic.cluster_store', 'line'],
'lines': [
['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'cluster_stats_indices_shards': {
'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
'elastic.cluster_indices_shards', 'stacked'],
'lines': [
['indices_count', 'indices', 'absolute'],
['indices_shards_total', 'shards', 'absolute']
- ]},
+ ]
+ },
'host_metrics_transport': {
'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
'elastic.host_transport', 'area'],
'lines': [
['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
- ]},
+ ]
+ },
'host_metrics_file_descriptors': {
'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
'elastic.host_descriptors', 'area'],
'lines': [
['file_descriptors_used', 'used', 'absolute', 1, 10]
- ]},
+ ]
+ },
'host_metrics_http': {
'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
'elastic.host_http_connections', 'line'],
'lines': [
['http_current_open', 'opened', 'absolute', 1, 1]
- ]}
+ ]
+ }
}
@@ -444,8 +516,8 @@ class Service(UrlService):
for method in self.methods:
if not method.run:
continue
- th = Thread(target=method.get_data,
- args=(queue, method.url))
+ th = threading.Thread(target=method.get_data,
+ args=(queue, method.url))
th.start()
threads.append(th)
@@ -466,7 +538,11 @@ class Service(UrlService):
if not raw_data:
return queue.put(dict())
- data = loads(raw_data)
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
to_netdata = fetch_data_(raw_data=data,
metrics=HEALTH_STATS)
@@ -488,7 +564,11 @@ class Service(UrlService):
if not raw_data:
return queue.put(dict())
- data = loads(raw_data)
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
to_netdata = fetch_data_(raw_data=data,
metrics=CLUSTER_STATS)
@@ -505,7 +585,10 @@ class Service(UrlService):
if not raw_data:
return queue.put(dict())
- data = loads(raw_data)
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
node = list(data['nodes'].keys())[0]
to_netdata = fetch_data_(raw_data=data['nodes'][node],
@@ -525,6 +608,13 @@ class Service(UrlService):
return queue.put(to_netdata)
+ def json_reply(self, reply):
+ try:
+ return json.loads(reply)
+ except ValueError as err:
+ self.error(err)
+ return None
+
def find_avg(self, total, spent_time, key):
if key not in self.latency:
self.latency[key] = dict(total=total,
diff --git a/python.d/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
index ee7ff62fc..85defa4d1 100644
--- a/python.d/example.chart.py
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: example netdata python.d module
-# Author: Pawel Krupa (paulfantom)
+# Author: Put your name here (your github login)
+# SPDX-License-Identifier: GPL-3.0-or-later
from random import SystemRandom
diff --git a/python.d/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
index 2e5b924ba..5431dd46b 100644
--- a/python.d/exim.chart.py
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: exim netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.ExecutableService import ExecutableService
@@ -14,17 +15,18 @@ ORDER = ['qemails']
CHARTS = {
'qemails': {
- 'options': [None, "Exim Queue Emails", "emails", 'queue', 'exim.qemails', 'line'],
+ 'options': [None, 'Exim Queue Emails', 'emails', 'queue', 'exim.qemails', 'line'],
'lines': [
['emails', None, 'absolute']
- ]}
+ ]
+ }
}
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = "exim -bpc"
+ self.command = 'exim -bpc'
self.order = ORDER
self.definitions = CHARTS
diff --git a/python.d/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
index 3acc58d1a..3126831b7 100644
--- a/python.d/freeradius.chart.py
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: freeradius netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from re import findall
from subprocess import Popen, PIPE
@@ -20,7 +21,7 @@ ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct']
CHARTS = {
'authentication': {
- 'options': [None, "Authentication", "packets/s", 'Authentication', 'freerad.auth', 'line'],
+ 'options': [None, 'Authentication', 'packets/s', 'Authentication', 'freerad.auth', 'line'],
'lines': [
['access-accepts', None, 'incremental'],
['access-rejects', None, 'incremental'],
@@ -29,9 +30,10 @@ CHARTS = {
['auth-invalid-requests', 'invalid-requests', 'incremental'],
['auth-malformed-requests', 'malformed-requests', 'incremental'],
['auth-unknown-types', 'unknown-types', 'incremental']
- ]},
+ ]
+ },
'accounting': {
- 'options': [None, "Accounting", "packets/s", 'Accounting', 'freerad.acct', 'line'],
+ 'options': [None, 'Accounting', 'packets/s', 'Accounting', 'freerad.acct', 'line'],
'lines': [
['accounting-requests', 'requests', 'incremental'],
['accounting-responses', 'responses', 'incremental'],
@@ -40,9 +42,10 @@ CHARTS = {
['acct-invalid-requests', 'invalid-requests', 'incremental'],
['acct-malformed-requests', 'malformed-requests', 'incremental'],
['acct-unknown-types', 'unknown-types', 'incremental']
- ]},
+ ]
+ },
'proxy-auth': {
- 'options': [None, "Proxy Authentication", "packets/s", 'Authentication', 'freerad.proxy.auth', 'line'],
+ 'options': [None, 'Proxy Authentication', 'packets/s', 'Authentication', 'freerad.proxy.auth', 'line'],
'lines': [
['proxy-access-accepts', 'access-accepts', 'incremental'],
['proxy-access-rejects', 'access-rejects', 'incremental'],
@@ -51,9 +54,10 @@ CHARTS = {
['proxy-auth-invalid-requests', 'invalid-requests', 'incremental'],
['proxy-auth-malformed-requests', 'malformed-requests', 'incremental'],
['proxy-auth-unknown-types', 'unknown-types', 'incremental']
- ]},
+ ]
+ },
'proxy-acct': {
- 'options': [None, "Proxy Accounting", "packets/s", 'Accounting', 'freerad.proxy.acct', 'line'],
+ 'options': [None, 'Proxy Accounting', 'packets/s', 'Accounting', 'freerad.proxy.acct', 'line'],
'lines': [
['proxy-accounting-requests', 'requests', 'incremental'],
['proxy-accounting-responses', 'responses', 'incremental'],
@@ -62,8 +66,8 @@ CHARTS = {
['proxy-acct-invalid-requests', 'invalid-requests', 'incremental'],
['proxy-acct-malformed-requests', 'malformed-requests', 'incremental'],
['proxy-acct-unknown-types', 'unknown-types', 'incremental']
- ]}
-
+ ]
+ }
}
@@ -105,7 +109,7 @@ class Service(SimpleService):
"""
result = self._get_raw_data()
return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)])
-
+
def _get_raw_data(self):
"""
The following code is equivalent to
diff --git a/python.d/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
index cbd462570..76e8b72ec 100644
--- a/python.d/go_expvar.chart.py
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: go_expvar netdata python.d module
# Author: Jan Kral (kralewitz)
+# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import division
import json
@@ -20,43 +21,50 @@ MEMSTATS_CHARTS = {
'lines': [
['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_stack': {
'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats',
'expvar.memstats.stack', 'line'],
'lines': [
['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_mspan': {
'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats',
'expvar.memstats.mspan', 'line'],
'lines': [
['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_mcache': {
'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats',
'expvar.memstats.mcache', 'line'],
'lines': [
['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_live_objects': {
'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats',
'expvar.memstats.live_objects', 'line'],
'lines': [
['memstats_live_objects', 'live']
- ]},
+ ]
+ },
'memstats_sys': {
'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats',
'expvar.memstats.sys', 'line'],
'lines': [
['memstats_sys', 'sys', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_gc_pauses': {
'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats',
'expvar.memstats.gc_pauses', 'line'],
'lines': [
['memstats_gc_pauses', 'avg']
- ]},
+ ]
+ }
}
MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache',
diff --git a/python.d/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
index 3061f5ef2..a46689f50 100644
--- a/python.d/haproxy.chart.py
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: haproxy netdata python.d module
# Author: l2isbad, ktarasz
+# SPDX-License-Identifier: GPL-3.0-or-later
from collections import defaultdict
from re import compile as re_compile
@@ -20,155 +21,185 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['fbin', 'fbout', 'fscur', 'fqcur',
- 'fhrsp_1xx', 'fhrsp_2xx', 'fhrsp_3xx', 'fhrsp_4xx', 'fhrsp_5xx', 'fhrsp_other', 'fhrsp_total',
- 'bbin', 'bbout', 'bscur', 'bqcur',
- 'bhrsp_1xx', 'bhrsp_2xx', 'bhrsp_3xx', 'bhrsp_4xx', 'bhrsp_5xx', 'bhrsp_other', 'bhrsp_total',
- 'bqtime', 'bttime', 'brtime', 'bctime',
- 'health_sup', 'health_sdown', 'health_bdown', 'health_idle']
+ORDER = [
+ 'fbin',
+ 'fbout',
+ 'fscur',
+ 'fqcur',
+ 'fhrsp_1xx',
+ 'fhrsp_2xx',
+ 'fhrsp_3xx',
+ 'fhrsp_4xx',
+ 'fhrsp_5xx',
+ 'fhrsp_other',
+ 'fhrsp_total',
+ 'bbin',
+ 'bbout',
+ 'bscur',
+ 'bqcur',
+ 'bhrsp_1xx',
+ 'bhrsp_2xx',
+ 'bhrsp_3xx',
+ 'bhrsp_4xx',
+ 'bhrsp_5xx',
+ 'bhrsp_other',
+ 'bhrsp_total',
+ 'bqtime',
+ 'bttime',
+ 'brtime',
+ 'bctime',
+ 'health_sup',
+ 'health_sdown',
+ 'health_bdown',
+ 'health_idle'
+]
CHARTS = {
'fbin': {
- 'options': [None, "Kilobytes In", "KB/s", 'frontend', 'haproxy_f.bin', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Kilobytes In', 'KB/s', 'frontend', 'haproxy_f.bin', 'line'],
+ 'lines': []
+ },
'fbout': {
- 'options': [None, "Kilobytes Out", "KB/s", 'frontend', 'haproxy_f.bout', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Kilobytes Out', 'KB/s', 'frontend', 'haproxy_f.bout', 'line'],
+ 'lines': []
+ },
'fscur': {
- 'options': [None, "Sessions Active", "sessions", 'frontend', 'haproxy_f.scur', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Sessions Active', 'sessions', 'frontend', 'haproxy_f.scur', 'line'],
+ 'lines': []
+ },
'fqcur': {
- 'options': [None, "Session In Queue", "sessions", 'frontend', 'haproxy_f.qcur', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Session In Queue', 'sessions', 'frontend', 'haproxy_f.qcur', 'line'],
+ 'lines': []
+ },
'fhrsp_1xx': {
- 'options': [None, "HTTP responses with 1xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
+ 'lines': []
+ },
'fhrsp_2xx': {
- 'options': [None, "HTTP responses with 2xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
+ 'lines': []
+ },
'fhrsp_3xx': {
- 'options': [None, "HTTP responses with 3xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
+ 'lines': []
+ },
'fhrsp_4xx': {
- 'options': [None, "HTTP responses with 4xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
+ 'lines': []
+ },
'fhrsp_5xx': {
- 'options': [None, "HTTP responses with 5xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
+ 'lines': []
+ },
'fhrsp_other': {
- 'options': [None, "HTTP responses with other codes (protocol error)", "responses/s", 'frontend', 'haproxy_f.hrsp_other', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'frontend',
+ 'haproxy_f.hrsp_other', 'line'],
+ 'lines': []
+ },
'fhrsp_total': {
- 'options': [None, "HTTP responses", "responses", 'frontend', 'haproxy_f.hrsp_total', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses', 'responses', 'frontend', 'haproxy_f.hrsp_total', 'line'],
+ 'lines': []
+ },
'bbin': {
- 'options': [None, "Kilobytes In", "KB/s", 'backend', 'haproxy_b.bin', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Kilobytes In', 'KB/s', 'backend', 'haproxy_b.bin', 'line'],
+ 'lines': []
+ },
'bbout': {
- 'options': [None, "Kilobytes Out", "KB/s", 'backend', 'haproxy_b.bout', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Kilobytes Out', 'KB/s', 'backend', 'haproxy_b.bout', 'line'],
+ 'lines': []
+ },
'bscur': {
- 'options': [None, "Sessions Active", "sessions", 'backend', 'haproxy_b.scur', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Sessions Active', 'sessions', 'backend', 'haproxy_b.scur', 'line'],
+ 'lines': []
+ },
'bqcur': {
- 'options': [None, "Sessions In Queue", "sessions", 'backend', 'haproxy_b.qcur', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Sessions In Queue', 'sessions', 'backend', 'haproxy_b.qcur', 'line'],
+ 'lines': []
+ },
'bhrsp_1xx': {
- 'options': [None, "HTTP responses with 1xx code", "responses/s", 'backend', 'haproxy_b.hrsp_1xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_1xx', 'line'],
+ 'lines': []
+ },
'bhrsp_2xx': {
- 'options': [None, "HTTP responses with 2xx code", "responses/s", 'backend', 'haproxy_b.hrsp_2xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_2xx', 'line'],
+ 'lines': []
+ },
'bhrsp_3xx': {
- 'options': [None, "HTTP responses with 3xx code", "responses/s", 'backend', 'haproxy_b.hrsp_3xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_3xx', 'line'],
+ 'lines': []
+ },
'bhrsp_4xx': {
- 'options': [None, "HTTP responses with 4xx code", "responses/s", 'backend', 'haproxy_b.hrsp_4xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_4xx', 'line'],
+ 'lines': []
+ },
'bhrsp_5xx': {
- 'options': [None, "HTTP responses with 5xx code", "responses/s", 'backend', 'haproxy_b.hrsp_5xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_5xx', 'line'],
+ 'lines': []
+ },
'bhrsp_other': {
- 'options': [None, "HTTP responses with other codes (protocol error)", "responses/s", 'backend',
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'backend',
'haproxy_b.hrsp_other', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'bhrsp_total': {
- 'options': [None, "HTTP responses (total)", "responses/s", 'backend', 'haproxy_b.hrsp_total', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses (total)', 'responses/s', 'backend', 'haproxy_b.hrsp_total', 'line'],
+ 'lines': []
+ },
'bqtime': {
- 'options': [None, "The average queue time over the 1024 last requests", "ms", 'backend', 'haproxy_b.qtime', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'The average queue time over the 1024 last requests', 'ms', 'backend',
+ 'haproxy_b.qtime', 'line'],
+ 'lines': []
+ },
'bctime': {
- 'options': [None, "The average connect time over the 1024 last requests", "ms", 'backend',
+ 'options': [None, 'The average connect time over the 1024 last requests', 'ms', 'backend',
'haproxy_b.ctime', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'brtime': {
- 'options': [None, "The average response time over the 1024 last requests", "ms", 'backend',
+ 'options': [None, 'The average response time over the 1024 last requests', 'ms', 'backend',
'haproxy_b.rtime', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'bttime': {
- 'options': [None, "The average total session time over the 1024 last requests", "ms", 'backend',
+ 'options': [None, 'The average total session time over the 1024 last requests', 'ms', 'backend',
'haproxy_b.ttime', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'health_sdown': {
- 'options': [None, "Backend Servers In DOWN State", "failed servers", 'health',
+ 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health',
'haproxy_hs.down', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'health_sup': {
- 'options': [None, "Backend Servers In UP State", "health servers", 'health',
+ 'options': [None, 'Backend Servers In UP State', 'health servers', 'health',
'haproxy_hs.up', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'health_bdown': {
- 'options': [None, "Is Backend Alive? 1 = DOWN", "failed backend", 'health', 'haproxy_hb.down', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Is Backend Alive? 1 = DOWN', 'failed backend', 'health', 'haproxy_hb.down', 'line'],
+ 'lines': []
+ },
'health_idle': {
- 'options': [None, "The Ratio Of Polling Time Vs Total Time", "percent", 'health', 'haproxy.idle', 'line'],
+ 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percent', 'health', 'haproxy.idle', 'line'],
'lines': [
['idle', None, 'absolute']
- ]}
+ ]
+ }
}
-METRICS = {'bin': {'algorithm': 'incremental', 'divisor': 1024},
- 'bout': {'algorithm': 'incremental', 'divisor': 1024},
- 'scur': {'algorithm': 'absolute', 'divisor': 1},
- 'qcur': {'algorithm': 'absolute', 'divisor': 1},
- 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1},
- }
+METRICS = {
+ 'bin': {'algorithm': 'incremental', 'divisor': 1024},
+ 'bout': {'algorithm': 'incremental', 'divisor': 1024},
+ 'scur': {'algorithm': 'absolute', 'divisor': 1},
+ 'qcur': {'algorithm': 'absolute', 'divisor': 1},
+ 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
+}
BACKEND_METRICS = {
diff --git a/python.d/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
index b0177ff90..f046f33c0 100644
--- a/python.d/httpcheck.chart.py
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: http check netdata python.d module
# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
import urllib3
import re
@@ -35,12 +36,14 @@ CHARTS = {
'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'],
'lines': [
[HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
- ]},
+ ]
+ },
'response_length': {
'options': [None, 'HTTP response body length', 'characters', 'response', 'httpcheck.responselength', 'line'],
'lines': [
[HTTP_RESPONSE_LENGTH, 'length', 'absolute']
- ]},
+ ]
+ },
'status': {
'options': [None, 'HTTP status', 'boolean', 'status', 'httpcheck.status', 'line'],
'lines': [
@@ -49,7 +52,8 @@ CHARTS = {
[HTTP_BAD_STATUS, 'bad status', 'absolute'],
[HTTP_TIMEOUT, 'timeout', 'absolute'],
[HTTP_NO_CONNECTION, 'no connection', 'absolute']
- ]}
+ ]
+ }
}
@@ -87,15 +91,15 @@ class Service(UrlService):
self.process_response(content, data, status)
except urllib3.exceptions.NewConnectionError as error:
- self.debug("Connection failed: {url}. Error: {error}".format(url=url, error=error))
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
data[HTTP_NO_CONNECTION] = 1
except (urllib3.exceptions.TimeoutError, urllib3.exceptions.PoolError) as error:
- self.debug("Connection timed out: {url}. Error: {error}".format(url=url, error=error))
+ self.debug('Connection timed out: {url}. Error: {error}'.format(url=url, error=error))
data[HTTP_TIMEOUT] = 1
except urllib3.exceptions.HTTPError as error:
- self.debug("Connection failed: {url}. Error: {error}".format(url=url, error=error))
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
data[HTTP_NO_CONNECTION] = 1
except (TypeError, AttributeError) as error:
@@ -109,7 +113,7 @@ class Service(UrlService):
self.debug('Content: \n\n{content}\n'.format(content=content))
if status in self.status_codes_accepted:
if self.regex and self.regex.search(content) is None:
- self.debug("No match for regex '{regex}' found".format(regex=self.regex.pattern))
+ self.debug('No match for regex "{regex}" found'.format(regex=self.regex.pattern))
data[HTTP_BAD_CONTENT] = 1
else:
data[HTTP_SUCCESS] = 1
diff --git a/python.d/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
index 792b99f3f..d8813f9ba 100644
--- a/python.d/icecast.chart.py
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: icecast netdata python.d module
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
@@ -15,10 +16,10 @@ ORDER = ['listeners']
CHARTS = {
'listeners': {
- 'options': [None, 'Number Of Listeners', 'listeners',
- 'listeners', 'icecast.listeners', 'line'],
+ 'options': [None, 'Number Of Listeners', 'listeners', 'listeners', 'icecast.listeners', 'line'],
'lines': [
- ]}
+ ]
+ }
}
@@ -86,7 +87,11 @@ class Service(UrlService):
try:
data = json.loads(raw_data)
except ValueError as error:
- self.error("JSON decode error:", error)
+ self.error('JSON decode error:', error)
return None
- return data['icestats'].get('source')
+ sources = data['icestats'].get('source')
+ if not sources:
+ return None
+
+ return sources if isinstance(sources, list) else [sources]
diff --git a/python.d/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
index 43500dfb5..3f6794e48 100644
--- a/python.d/ipfs.chart.py
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: IPFS netdata python.d module
-# Authors: Pawel Krupa (paulfantom), davidak
+# Authors: davidak
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
@@ -26,31 +27,43 @@ CHARTS = {
'bandwidth': {
'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
'lines': [
- ["in", None, "absolute", 8, 1000],
- ["out", None, "absolute", -8, 1000]
- ]},
+ ['in', None, 'absolute', 8, 1000],
+ ['out', None, 'absolute', -8, 1000]
+ ]
+ },
'peers': {
'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
'lines': [
- ["peers", None, 'absolute']
- ]},
+ ['peers', None, 'absolute']
+ ]
+ },
'repo_size': {
'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'],
'lines': [
- ["avail", None, "absolute", 1, 1e9],
- ["size", None, "absolute", 1, 1e9],
- ]},
+ ['avail', None, 'absolute', 1, 1e9],
+ ['size', None, 'absolute', 1, 1e9],
+ ]
+ },
'repo_objects': {
'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
'lines': [
- ["objects", None, "absolute", 1, 1],
- ["pinned", None, "absolute", 1, 1],
- ["recursive_pins", None, "absolute", 1, 1]
- ]},
+ ['objects', None, 'absolute', 1, 1],
+ ['pinned', None, 'absolute', 1, 1],
+ ['recursive_pins', None, 'absolute', 1, 1]
+ ]
+ }
}
-SI_zeroes = {'k': 3, 'm': 6, 'g': 9, 't': 12,
- 'p': 15, 'e': 18, 'z': 21, 'y': 24}
+SI_zeroes = {
+ 'k': 3,
+ 'm': 6,
+ 'g': 9,
+ 't': 12,
+ 'p': 15,
+ 'e': 18,
+ 'z': 21,
+ 'y': 24
+}
class Service(UrlService):
@@ -60,6 +73,7 @@ class Service(UrlService):
self.order = ORDER
self.definitions = CHARTS
self.__storage_max = None
+ self.do_pinapi = self.configuration.get('pinapi')
def _get_json(self, sub_url):
"""
@@ -73,7 +87,7 @@ class Service(UrlService):
@staticmethod
def _recursive_pins(keys):
- return len([k for k in keys if keys[k]["Type"] == b"recursive"])
+ return sum(1 for k in keys if keys[k]['Type'] == b'recursive')
@staticmethod
def _dehumanize(store_max):
@@ -93,7 +107,7 @@ class Service(UrlService):
def _storagemax(self, store_cfg):
if self.__storage_max is None:
- self.__storage_max = self._dehumanize(store_cfg['StorageMax'])
+ self.__storage_max = self._dehumanize(store_cfg)
return self.__storage_max
def _get_data(self):
@@ -106,13 +120,15 @@ class Service(UrlService):
'/api/v0/stats/bw':
[('in', 'RateIn', int), ('out', 'RateOut', int)],
'/api/v0/swarm/peers':
- [('peers', 'Strings', len)],
+ [('peers', 'Peers', len)],
'/api/v0/stats/repo':
- [('size', 'RepoSize', int), ('objects', 'NumObjects', int)],
- '/api/v0/pin/ls':
- [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)],
- '/api/v0/config/show': [('avail', 'Datastore', self._storagemax)]
+ [('size', 'RepoSize', int), ('objects', 'NumObjects', int), ('avail', 'StorageMax', self._storagemax)],
}
+ if self.do_pinapi:
+ cfg.update({
+ '/api/v0/pin/ls':
+ [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)]
+ })
r = dict()
for suburl in cfg:
in_json = self._get_json(suburl)
diff --git a/python.d/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
index eb6338452..a9f274949 100644
--- a/python.d/isc_dhcpd.chart.py
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: isc dhcpd lease netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
import re
@@ -25,17 +26,18 @@ ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total']
CHARTS = {
'pools_utilization': {
- 'options': [None, 'Pools Utilization', '%', 'utilization',
- 'isc_dhcpd.utilization', 'line'],
- 'lines': []},
+ 'options': [None, 'Pools Utilization', '%', 'utilization', 'isc_dhcpd.utilization', 'line'],
+ 'lines': []
+ },
'pools_active_leases': {
- 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases',
- 'isc_dhcpd.active_leases', 'line'],
- 'lines': []},
+ 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases', 'isc_dhcpd.active_leases', 'line'],
+ 'lines': []
+ },
'leases_total': {
- 'options': [None, 'All Active Leases', 'leases', 'active leases',
- 'isc_dhcpd.leases_total', 'line'],
- 'lines': [['leases_total', 'leases', 'absolute']],
+ 'options': [None, 'All Active Leases', 'leases', 'active leases', 'isc_dhcpd.leases_total', 'line'],
+ 'lines': [
+ ['leases_total', 'leases', 'absolute']
+ ],
'variables': [
['leases_size']
]
diff --git a/python.d/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
index 4f7adfa23..3c310ec69 100644
--- a/python.d/memcached.chart.py
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -1,11 +1,12 @@
# -*- coding: utf-8 -*-
# Description: memcached netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.SocketService import SocketService
# default module values (can be overridden per job in `config`)
-#update_every = 2
+# update_every = 2
priority = 60000
retries = 60
@@ -28,92 +29,106 @@ CHARTS = {
'lines': [
['avail', 'available', 'absolute', 1, 1048576],
['used', 'used', 'absolute', 1, 1048576]
- ]},
+ ]
+ },
'net': {
'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
'lines': [
['bytes_read', 'in', 'incremental', 8, 1024],
['bytes_written', 'out', 'incremental', -8, 1024]
- ]},
+ ]
+ },
'connections': {
'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
'lines': [
['curr_connections', 'current', 'incremental'],
['rejected_connections', 'rejected', 'incremental'],
['total_connections', 'total', 'incremental']
- ]},
+ ]
+ },
'items': {
'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
'lines': [
['curr_items', 'current', 'absolute'],
['total_items', 'total', 'absolute']
- ]},
+ ]
+ },
'evicted_reclaimed': {
'options': [None, 'Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
'lines': [
['reclaimed', 'reclaimed', 'absolute'],
['evictions', 'evicted', 'absolute']
- ]},
+ ]
+ },
'get': {
'options': [None, 'Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
'lines': [
['get_hits', 'hits', 'percent-of-absolute-row'],
['get_misses', 'misses', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'get_rate': {
'options': [None, 'Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
'lines': [
['cmd_get', 'rate', 'incremental']
- ]},
+ ]
+ },
'set_rate': {
'options': [None, 'Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
'lines': [
['cmd_set', 'rate', 'incremental']
- ]},
+ ]
+ },
'delete': {
'options': [None, 'Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
'lines': [
['delete_hits', 'hits', 'percent-of-absolute-row'],
['delete_misses', 'misses', 'percent-of-absolute-row'],
- ]},
+ ]
+ },
'cas': {
'options': [None, 'Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
'lines': [
['cas_hits', 'hits', 'percent-of-absolute-row'],
['cas_misses', 'misses', 'percent-of-absolute-row'],
['cas_badval', 'bad value', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'increment': {
'options': [None, 'Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
'lines': [
['incr_hits', 'hits', 'percent-of-absolute-row'],
['incr_misses', 'misses', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'decrement': {
'options': [None, 'Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
'lines': [
['decr_hits', 'hits', 'percent-of-absolute-row'],
['decr_misses', 'misses', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'touch': {
'options': [None, 'Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
'lines': [
['touch_hits', 'hits', 'percent-of-absolute-row'],
['touch_misses', 'misses', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'touch_rate': {
'options': [None, 'Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
'lines': [
['cmd_touch', 'rate', 'incremental']
- ]}
+ ]
+ }
}
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
- self.request = "stats\r\n"
- self.host = "localhost"
+ self.request = 'stats\r\n'
+ self.host = 'localhost'
self.port = 11211
self._keep_alive = True
self.unix_socket = None
@@ -131,13 +146,13 @@ class Service(SocketService):
return None
if response.startswith('ERROR'):
- self.error("received ERROR")
+ self.error('received ERROR')
return None
try:
- parsed = response.split("\n")
+ parsed = response.split('\n')
except AttributeError:
- self.error("response is invalid/empty")
+ self.error('response is invalid/empty')
return None
# split the response
@@ -148,7 +163,7 @@ class Service(SocketService):
t = line[5:].split(' ')
data[t[0]] = t[1]
except (IndexError, ValueError):
- self.debug("invalid line received: " + str(line))
+ self.debug('invalid line received: ' + str(line))
if not data:
self.error("received data doesn't have any records")
@@ -165,10 +180,10 @@ class Service(SocketService):
def _check_raw_data(self, data):
if data.endswith('END\r\n'):
- self.debug("received full response from memcached")
+ self.debug('received full response from memcached')
return True
- self.debug("waiting more data from memcached")
+ self.debug('waiting more data from memcached')
return False
def check(self):
diff --git a/python.d/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
index 909a419da..10344342d 100644
--- a/python.d/mongodb.chart.py
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: mongodb netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from copy import deepcopy
from datetime import datetime
@@ -31,7 +32,8 @@ REPL_SET_STATES = [
('6', 'unknown'),
('9', 'rollback'),
('10', 'removed'),
- ('0', 'startup')]
+ ('0', 'startup')
+]
def multiply_by_100(value):
@@ -141,12 +143,37 @@ DBSTATS = [
]
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['read_operations', 'write_operations', 'active_clients', 'journaling_transactions',
- 'journaling_volume', 'background_flush_average', 'background_flush_last', 'background_flush_rate',
- 'wiredtiger_read', 'wiredtiger_write', 'cursors', 'connections', 'memory', 'page_faults',
- 'queued_requests', 'record_moves', 'wiredtiger_cache', 'wiredtiger_pages_evicted', 'asserts',
- 'locks_collection', 'locks_database', 'locks_global', 'locks_metadata', 'locks_oplog',
- 'dbstats_objects', 'tcmalloc_generic', 'tcmalloc_metrics', 'command_total_rate', 'command_failed_rate']
+ORDER = [
+ 'read_operations',
+ 'write_operations',
+ 'active_clients',
+ 'journaling_transactions',
+ 'journaling_volume',
+ 'background_flush_average',
+ 'background_flush_last',
+ 'background_flush_rate',
+ 'wiredtiger_read',
+ 'wiredtiger_write',
+ 'cursors',
+ 'connections',
+ 'memory',
+ 'page_faults',
+ 'queued_requests',
+ 'record_moves',
+ 'wiredtiger_cache',
+ 'wiredtiger_pages_evicted',
+ 'asserts',
+ 'locks_collection',
+ 'locks_database',
+ 'locks_global',
+ 'locks_metadata',
+ 'locks_oplog',
+ 'dbstats_objects',
+ 'tcmalloc_generic',
+ 'tcmalloc_metrics',
+ 'command_total_rate',
+ 'command_failed_rate'
+]
CHARTS = {
'read_operations': {
@@ -155,7 +182,8 @@ CHARTS = {
'lines': [
['query', None, 'incremental'],
['getmore', None, 'incremental']
- ]},
+ ]
+ },
'write_operations': {
'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
'mongodb.write_operations', 'line'],
@@ -163,57 +191,66 @@ CHARTS = {
['insert', None, 'incremental'],
['update', None, 'incremental'],
['delete', None, 'incremental']
- ]},
+ ]
+ },
'active_clients': {
'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
'throughput metrics', 'mongodb.active_clients', 'line'],
'lines': [
['activeClients_readers', 'readers', 'absolute'],
['activeClients_writers', 'writers', 'absolute']
- ]},
+ ]
+ },
'journaling_transactions': {
'options': [None, 'Transactions that have been written to the journal', 'commits',
'database performance', 'mongodb.journaling_transactions', 'line'],
'lines': [
['commits', None, 'absolute']
- ]},
+ ]
+ },
'journaling_volume': {
'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
'mongodb.journaling_volume', 'line'],
'lines': [
['journaledMB', 'volume', 'absolute', 1, 100]
- ]},
+ ]
+ },
'background_flush_average': {
'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
'mongodb.background_flush_average', 'line'],
'lines': [
['average_ms', 'time', 'absolute', 1, 100]
- ]},
+ ]
+ },
'background_flush_last': {
'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
'mongodb.background_flush_last', 'line'],
'lines': [
['last_ms', 'time', 'absolute', 1, 100]
- ]},
+ ]
+ },
'background_flush_rate': {
'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
'lines': [
['flushes', 'flushes', 'incremental', 1, 1]
- ]},
+ ]
+ },
'wiredtiger_read': {
'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
'mongodb.wiredtiger_read', 'stacked'],
'lines': [
['wiredTigerRead_available', 'available', 'absolute', 1, 1],
['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
- ]},
+ ]
+ },
'wiredtiger_write': {
'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
'mongodb.wiredtiger_write', 'stacked'],
'lines': [
['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
- ]},
+ ]
+ },
'cursors': {
'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
@@ -221,14 +258,16 @@ CHARTS = {
['cursor_total', 'openned', 'absolute', 1, 1],
['noTimeout', None, 'absolute', 1, 1],
['timedOut', None, 'incremental', 1, 1]
- ]},
+ ]
+ },
'connections': {
'options': [None, 'Currently connected clients and unused connections', 'connections',
'resource utilization', 'mongodb.connections', 'stacked'],
'lines': [
['connections_available', 'unused', 'absolute', 1, 1],
['connections_current', 'connected', 'absolute', 1, 1]
- ]},
+ ]
+ },
'memory': {
'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
'lines': [
@@ -236,60 +275,70 @@ CHARTS = {
['resident', None, 'absolute', 1, 1],
['nonmapped', None, 'absolute', 1, 1],
['mapped', None, 'absolute', 1, 1]
- ]},
+ ]
+ },
'page_faults': {
'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
'resource utilization', 'mongodb.page_faults', 'line'],
'lines': [
['page_faults', None, 'incremental', 1, 1]
- ]},
+ ]
+ },
'queued_requests': {
- 'options': [None, 'Currently queued read and wrire requests', 'requests', 'resource saturation',
+ 'options': [None, 'Currently queued read and write requests', 'requests', 'resource saturation',
'mongodb.queued_requests', 'line'],
'lines': [
['currentQueue_readers', 'readers', 'absolute', 1, 1],
['currentQueue_writers', 'writers', 'absolute', 1, 1]
- ]},
+ ]
+ },
'record_moves': {
'options': [None, 'Number of times documents had to be moved on-disk', 'number',
'resource saturation', 'mongodb.record_moves', 'line'],
'lines': [
['moves', None, 'incremental', 1, 1]
- ]},
+ ]
+ },
'asserts': {
- 'options': [None, 'Number of message, warning, regular, corresponding to errors generated'
- ' by users assertions raised', 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
+ 'options': [
+ None,
+ 'Number of message, warning, regular, corresponding to errors generated by users assertions raised',
+ 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
'lines': [
['msg', None, 'incremental', 1, 1],
['warning', None, 'incremental', 1, 1],
['regular', None, 'incremental', 1, 1],
['user', None, 'incremental', 1, 1]
- ]},
+ ]
+ },
'wiredtiger_cache': {
'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
'lines': [
['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'wiredtiger_pages_evicted': {
'options': [None, 'Pages evicted from the cache',
'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
'lines': [
['unmodified', None, 'absolute', 1, 1],
['modified', None, 'absolute', 1, 1]
- ]},
+ ]
+ },
'dbstats_objects': {
'options': [None, 'Number of documents in the database among all the collections', 'documents',
'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'tcmalloc_generic': {
'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
'lines': [
['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
['heap_size', 'heap_size', 'absolute', 1, 1048576]
- ]},
+ ]
+ },
'tcmalloc_metrics': {
'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
'lines': [
@@ -299,7 +348,8 @@ CHARTS = {
['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'command_total_rate': {
'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
'lines': [
@@ -310,7 +360,8 @@ CHARTS = {
['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
['insert_total', 'insert', 'incremental', 1, 1],
['update_total', 'update', 'incremental', 1, 1]
- ]},
+ ]
+ },
'command_failed_rate': {
'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
'lines': [
@@ -321,7 +372,8 @@ CHARTS = {
['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
['insert_failed', 'insert', 'incremental', 1, 1],
['update_failed', 'update', 'incremental', 1, 1]
- ]},
+ ]
+ },
'locks_collection': {
'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
@@ -330,7 +382,8 @@ CHARTS = {
['Collection_W', 'exclusive', 'incremental'],
['Collection_r', 'intent_shared', 'incremental'],
['Collection_w', 'intent_exclusive', 'incremental']
- ]},
+ ]
+ },
'locks_database': {
'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
@@ -339,7 +392,8 @@ CHARTS = {
['Database_W', 'exclusive', 'incremental'],
['Database_r', 'intent_shared', 'incremental'],
['Database_w', 'intent_exclusive', 'incremental']
- ]},
+ ]
+ },
'locks_global': {
'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
@@ -348,21 +402,24 @@ CHARTS = {
['Global_W', 'exclusive', 'incremental'],
['Global_r', 'intent_shared', 'incremental'],
['Global_w', 'intent_exclusive', 'incremental']
- ]},
+ ]
+ },
'locks_metadata': {
'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
'lines': [
['Metadata_R', 'shared', 'incremental'],
['Metadata_w', 'intent_exclusive', 'incremental']
- ]},
+ ]
+ },
'locks_oplog': {
'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
'lines': [
['oplog_r', 'intent_shared', 'incremental'],
['oplog_w', 'intent_exclusive', 'incremental']
- ]}
+ ]
+ }
}
@@ -383,7 +440,7 @@ class Service(SimpleService):
def check(self):
if not PYMONGO:
- self.error('Pymongo module is needed to use mongodb.chart.py')
+ self.error('Pymongo package v2.4+ is needed to use mongodb.chart.py')
return False
self.connection, server_status, error = self._create_connection()
if error:
@@ -491,9 +548,10 @@ class Service(SimpleService):
# Create "heartbeat delay" chart
self.order.append('heartbeat_delay')
self.definitions['heartbeat_delay'] = {
- 'options': [None, 'Time when last heartbeat was received'
- ' from the replica set member (lastHeartbeatRecv)',
- 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
+ 'options': [
+ None,
+ 'Time when last heartbeat was received from the replica set member (lastHeartbeatRecv)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
'lines': create_lines(other_hosts, 'heartbeat_lag')}
# Create "optimedate delay" chart
self.order.append('optimedate_delay')
@@ -561,9 +619,9 @@ class Service(SimpleService):
raw_data['getReplicationInfo'] = dict()
try:
raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
- "$natural", ASCENDING).limit(1)[0]
+ '$natural', ASCENDING).limit(1)[0]
raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
- "$natural", DESCENDING).limit(1)[0]
+ '$natural', DESCENDING).limit(1)[0]
return raw_data
except PyMongoError:
return None
diff --git a/python.d/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
index 4c7058b26..c4d1e8b3a 100644
--- a/python.d/mysql.chart.py
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description: MySQL netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.MySQLService import MySQLService
@@ -12,118 +14,127 @@ retries = 60
# query executed on MySQL server
QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
QUERY_SLAVE = 'SHOW SLAVE STATUS;'
+QUERY_VARIABLES = 'SHOW GLOBAL VARIABLES LIKE \'max_connections\';'
GLOBAL_STATS = [
- 'Bytes_received',
- 'Bytes_sent',
- 'Queries',
- 'Questions',
- 'Slow_queries',
- 'Handler_commit',
- 'Handler_delete',
- 'Handler_prepare',
- 'Handler_read_first',
- 'Handler_read_key',
- 'Handler_read_next',
- 'Handler_read_prev',
- 'Handler_read_rnd',
- 'Handler_read_rnd_next',
- 'Handler_rollback',
- 'Handler_savepoint',
- 'Handler_savepoint_rollback',
- 'Handler_update',
- 'Handler_write',
- 'Table_locks_immediate',
- 'Table_locks_waited',
- 'Select_full_join',
- 'Select_full_range_join',
- 'Select_range',
- 'Select_range_check',
- 'Select_scan',
- 'Sort_merge_passes',
- 'Sort_range',
- 'Sort_scan',
- 'Created_tmp_disk_tables',
- 'Created_tmp_files',
- 'Created_tmp_tables',
- 'Connections',
- 'Aborted_connects',
- 'Binlog_cache_disk_use',
- 'Binlog_cache_use',
- 'Threads_connected',
- 'Threads_created',
- 'Threads_cached',
- 'Threads_running',
- 'Thread_cache_misses',
- 'Innodb_data_read',
- 'Innodb_data_written',
- 'Innodb_data_reads',
- 'Innodb_data_writes',
- 'Innodb_data_fsyncs',
- 'Innodb_data_pending_reads',
- 'Innodb_data_pending_writes',
- 'Innodb_data_pending_fsyncs',
- 'Innodb_log_waits',
- 'Innodb_log_write_requests',
- 'Innodb_log_writes',
- 'Innodb_os_log_fsyncs',
- 'Innodb_os_log_pending_fsyncs',
- 'Innodb_os_log_pending_writes',
- 'Innodb_os_log_written',
- 'Innodb_row_lock_current_waits',
- 'Innodb_rows_inserted',
- 'Innodb_rows_read',
- 'Innodb_rows_updated',
- 'Innodb_rows_deleted',
- 'Innodb_buffer_pool_pages_data',
- 'Innodb_buffer_pool_pages_dirty',
- 'Innodb_buffer_pool_pages_free',
- 'Innodb_buffer_pool_pages_flushed',
- 'Innodb_buffer_pool_pages_misc',
- 'Innodb_buffer_pool_pages_total',
- 'Innodb_buffer_pool_bytes_data',
- 'Innodb_buffer_pool_bytes_dirty',
- 'Innodb_buffer_pool_read_ahead',
- 'Innodb_buffer_pool_read_ahead_evicted',
- 'Innodb_buffer_pool_read_ahead_rnd',
- 'Innodb_buffer_pool_read_requests',
- 'Innodb_buffer_pool_write_requests',
- 'Innodb_buffer_pool_reads',
- 'Innodb_buffer_pool_wait_free',
- 'Qcache_hits',
- 'Qcache_lowmem_prunes',
- 'Qcache_inserts',
- 'Qcache_not_cached',
- 'Qcache_queries_in_cache',
- 'Qcache_free_memory',
- 'Qcache_free_blocks',
- 'Qcache_total_blocks',
- 'Key_blocks_unused',
- 'Key_blocks_used',
- 'Key_blocks_not_flushed',
- 'Key_read_requests',
- 'Key_write_requests',
- 'Key_reads',
- 'Key_writes',
- 'Open_files',
- 'Opened_files',
- 'Binlog_stmt_cache_disk_use',
- 'Binlog_stmt_cache_use',
- 'Connection_errors_accept',
- 'Connection_errors_internal',
- 'Connection_errors_max_connections',
- 'Connection_errors_peer_address',
- 'Connection_errors_select',
- 'Connection_errors_tcpwrap',
- 'wsrep_local_recv_queue',
- 'wsrep_local_send_queue',
- 'wsrep_received',
- 'wsrep_replicated',
- 'wsrep_received_bytes',
- 'wsrep_replicated_bytes',
- 'wsrep_local_bf_aborts',
- 'wsrep_local_cert_failures',
- 'wsrep_flow_control_paused_ns']
+ 'Bytes_received',
+ 'Bytes_sent',
+ 'Queries',
+ 'Questions',
+ 'Slow_queries',
+ 'Handler_commit',
+ 'Handler_delete',
+ 'Handler_prepare',
+ 'Handler_read_first',
+ 'Handler_read_key',
+ 'Handler_read_next',
+ 'Handler_read_prev',
+ 'Handler_read_rnd',
+ 'Handler_read_rnd_next',
+ 'Handler_rollback',
+ 'Handler_savepoint',
+ 'Handler_savepoint_rollback',
+ 'Handler_update',
+ 'Handler_write',
+ 'Table_locks_immediate',
+ 'Table_locks_waited',
+ 'Select_full_join',
+ 'Select_full_range_join',
+ 'Select_range',
+ 'Select_range_check',
+ 'Select_scan',
+ 'Sort_merge_passes',
+ 'Sort_range',
+ 'Sort_scan',
+ 'Created_tmp_disk_tables',
+ 'Created_tmp_files',
+ 'Created_tmp_tables',
+ 'Connections',
+ 'Aborted_connects',
+ 'Max_used_connections',
+ 'Binlog_cache_disk_use',
+ 'Binlog_cache_use',
+ 'Threads_connected',
+ 'Threads_created',
+ 'Threads_cached',
+ 'Threads_running',
+ 'Thread_cache_misses',
+ 'Innodb_data_read',
+ 'Innodb_data_written',
+ 'Innodb_data_reads',
+ 'Innodb_data_writes',
+ 'Innodb_data_fsyncs',
+ 'Innodb_data_pending_reads',
+ 'Innodb_data_pending_writes',
+ 'Innodb_data_pending_fsyncs',
+ 'Innodb_log_waits',
+ 'Innodb_log_write_requests',
+ 'Innodb_log_writes',
+ 'Innodb_os_log_fsyncs',
+ 'Innodb_os_log_pending_fsyncs',
+ 'Innodb_os_log_pending_writes',
+ 'Innodb_os_log_written',
+ 'Innodb_row_lock_current_waits',
+ 'Innodb_rows_inserted',
+ 'Innodb_rows_read',
+ 'Innodb_rows_updated',
+ 'Innodb_rows_deleted',
+ 'Innodb_buffer_pool_pages_data',
+ 'Innodb_buffer_pool_pages_dirty',
+ 'Innodb_buffer_pool_pages_free',
+ 'Innodb_buffer_pool_pages_flushed',
+ 'Innodb_buffer_pool_pages_misc',
+ 'Innodb_buffer_pool_pages_total',
+ 'Innodb_buffer_pool_bytes_data',
+ 'Innodb_buffer_pool_bytes_dirty',
+ 'Innodb_buffer_pool_read_ahead',
+ 'Innodb_buffer_pool_read_ahead_evicted',
+ 'Innodb_buffer_pool_read_ahead_rnd',
+ 'Innodb_buffer_pool_read_requests',
+ 'Innodb_buffer_pool_write_requests',
+ 'Innodb_buffer_pool_reads',
+ 'Innodb_buffer_pool_wait_free',
+ 'Qcache_hits',
+ 'Qcache_lowmem_prunes',
+ 'Qcache_inserts',
+ 'Qcache_not_cached',
+ 'Qcache_queries_in_cache',
+ 'Qcache_free_memory',
+ 'Qcache_free_blocks',
+ 'Qcache_total_blocks',
+ 'Key_blocks_unused',
+ 'Key_blocks_used',
+ 'Key_blocks_not_flushed',
+ 'Key_read_requests',
+ 'Key_write_requests',
+ 'Key_reads',
+ 'Key_writes',
+ 'Open_files',
+ 'Opened_files',
+ 'Binlog_stmt_cache_disk_use',
+ 'Binlog_stmt_cache_use',
+ 'Connection_errors_accept',
+ 'Connection_errors_internal',
+ 'Connection_errors_max_connections',
+ 'Connection_errors_peer_address',
+ 'Connection_errors_select',
+ 'Connection_errors_tcpwrap',
+ 'wsrep_local_recv_queue',
+ 'wsrep_local_send_queue',
+ 'wsrep_received',
+ 'wsrep_replicated',
+ 'wsrep_received_bytes',
+ 'wsrep_replicated_bytes',
+ 'wsrep_local_bf_aborts',
+ 'wsrep_local_cert_failures',
+ 'wsrep_flow_control_paused_ns',
+ 'Com_delete',
+ 'Com_insert',
+ 'Com_select',
+ 'Com_update',
+ 'Com_replace'
+]
+
def slave_seconds(value):
try:
@@ -142,22 +153,56 @@ SLAVE_STATS = [
('Slave_IO_Running', slave_running)
]
-ORDER = ['net',
- 'queries',
- 'handlers',
- 'table_locks',
- 'join_issues', 'sort_issues',
- 'tmp',
- 'connections', 'connection_errors',
- 'binlog_cache', 'binlog_stmt_cache',
- 'threads', 'thread_cache_misses',
- 'innodb_io', 'innodb_io_ops', 'innodb_io_pending_ops', 'innodb_log', 'innodb_os_log', 'innodb_os_log_io',
- 'innodb_cur_row_lock', 'innodb_rows', 'innodb_buffer_pool_pages', 'innodb_buffer_pool_bytes',
- 'innodb_buffer_pool_read_ahead', 'innodb_buffer_pool_reqs', 'innodb_buffer_pool_ops',
- 'qcache_ops', 'qcache', 'qcache_freemem', 'qcache_memblocks',
- 'key_blocks', 'key_requests', 'key_disk_ops',
- 'files', 'files_rate', 'slave_behind', 'slave_status',
- 'galera_writesets', 'galera_bytes', 'galera_queue', 'galera_conflicts', 'galera_flow_control']
+VARIABLES = [
+ 'max_connections'
+]
+
+ORDER = [
+ 'net',
+ 'queries',
+ 'queries_type',
+ 'handlers',
+ 'table_locks',
+ 'join_issues',
+ 'sort_issues',
+ 'tmp',
+ 'connections',
+ 'connections_active',
+ 'connection_errors',
+ 'binlog_cache',
+ 'binlog_stmt_cache',
+ 'threads',
+ 'thread_cache_misses',
+ 'innodb_io',
+ 'innodb_io_ops',
+ 'innodb_io_pending_ops',
+ 'innodb_log',
+ 'innodb_os_log',
+ 'innodb_os_log_io',
+ 'innodb_cur_row_lock',
+ 'innodb_rows',
+ 'innodb_buffer_pool_pages',
+ 'innodb_buffer_pool_bytes',
+ 'innodb_buffer_pool_read_ahead',
+ 'innodb_buffer_pool_reqs',
+ 'innodb_buffer_pool_ops',
+ 'qcache_ops',
+ 'qcache',
+ 'qcache_freemem',
+ 'qcache_memblocks',
+ 'key_blocks',
+ 'key_requests',
+ 'key_disk_ops',
+ 'files',
+ 'files_rate',
+ 'slave_behind',
+ 'slave_status',
+ 'galera_writesets',
+ 'galera_bytes',
+ 'galera_queue',
+ 'galera_conflicts',
+ 'galera_flow_control'
+]
CHARTS = {
'net': {
@@ -165,14 +210,27 @@ CHARTS = {
'lines': [
['Bytes_received', 'in', 'incremental', 8, 1024],
['Bytes_sent', 'out', 'incremental', -8, 1024]
- ]},
+ ]
+ },
'queries': {
'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
'lines': [
['Queries', 'queries', 'incremental'],
['Questions', 'questions', 'incremental'],
['Slow_queries', 'slow_queries', 'incremental']
- ]},
+ ]
+ },
+ 'queries_type': {
+ 'options': [None, 'mysql Query type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
+ 'lines': [
+ ['Com_select', 'select', 'incremental'],
+ ['Com_delete', 'delete', 'incremental'],
+ ['Com_update', 'update', 'incremental'],
+ ['Com_insert', 'insert', 'incremental'],
+ ['Qcache_hits', 'cache_hits', 'incremental'],
+ ['Com_replace', 'replace', 'incremental']
+ ]
+ },
'handlers': {
'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
'lines': [
@@ -190,13 +248,15 @@ CHARTS = {
['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'],
['Handler_update', 'update', 'incremental'],
['Handler_write', 'write', 'incremental']
- ]},
+ ]
+ },
'table_locks': {
'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
'lines': [
['Table_locks_immediate', 'immediate', 'incremental'],
['Table_locks_waited', 'waited', 'incremental', -1, 1]
- ]},
+ ]
+ },
'join_issues': {
'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
'lines': [
@@ -205,33 +265,46 @@ CHARTS = {
['Select_range', 'range', 'incremental'],
['Select_range_check', 'range_check', 'incremental'],
['Select_scan', 'scan', 'incremental']
- ]},
+ ]
+ },
'sort_issues': {
'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
'lines': [
['Sort_merge_passes', 'merge_passes', 'incremental'],
['Sort_range', 'range', 'incremental'],
['Sort_scan', 'scan', 'incremental']
- ]},
+ ]
+ },
'tmp': {
'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
'lines': [
['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
['Created_tmp_files', 'files', 'incremental'],
['Created_tmp_tables', 'tables', 'incremental']
- ]},
+ ]
+ },
'connections': {
'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
'lines': [
['Connections', 'all', 'incremental'],
['Aborted_connects', 'aborted', 'incremental']
- ]},
+ ]
+ },
+ 'connections_active': {
+ 'options': [None, 'mysql Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
+ 'lines': [
+ ['Threads_connected', 'active', 'absolute'],
+ ['max_connections', 'limit', 'absolute'],
+ ['Max_used_connections', 'max_active', 'absolute']
+ ]
+ },
'binlog_cache': {
'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
'lines': [
['Binlog_cache_disk_use', 'disk', 'incremental'],
['Binlog_cache_use', 'all', 'incremental']
- ]},
+ ]
+ },
'threads': {
'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'],
'lines': [
@@ -239,25 +312,29 @@ CHARTS = {
['Threads_created', 'created', 'incremental'],
['Threads_cached', 'cached', 'absolute', -1, 1],
['Threads_running', 'running', 'absolute'],
- ]},
+ ]
+ },
'thread_cache_misses': {
'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
'lines': [
['Thread_cache_misses', 'misses', 'absolute', 1, 100]
- ]},
+ ]
+ },
'innodb_io': {
'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'],
'lines': [
['Innodb_data_read', 'read', 'incremental', 1, 1024],
['Innodb_data_written', 'write', 'incremental', -1, 1024]
- ]},
+ ]
+ },
'innodb_io_ops': {
'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
'lines': [
['Innodb_data_reads', 'reads', 'incremental'],
['Innodb_data_writes', 'writes', 'incremental', -1, 1],
['Innodb_data_fsyncs', 'fsyncs', 'incremental']
- ]},
+ ]
+ },
'innodb_io_pending_ops': {
'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb',
'mysql.innodb_io_pending_ops', 'line'],
@@ -265,32 +342,37 @@ CHARTS = {
['Innodb_data_pending_reads', 'reads', 'absolute'],
['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1],
['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute']
- ]},
+ ]
+ },
'innodb_log': {
'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
'lines': [
['Innodb_log_waits', 'waits', 'incremental'],
['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
['Innodb_log_writes', 'writes', 'incremental', -1, 1],
- ]},
+ ]
+ },
'innodb_os_log': {
'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
'lines': [
['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'],
['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1],
- ]},
+ ]
+ },
'innodb_os_log_io': {
'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
'lines': [
['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
- ]},
+ ]
+ },
'innodb_cur_row_lock': {
'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb',
'mysql.innodb_cur_row_lock', 'area'],
'lines': [
['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
- ]},
+ ]
+ },
'innodb_rows': {
'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
'lines': [
@@ -298,7 +380,8 @@ CHARTS = {
['Innodb_rows_read', 'read', 'incremental', 1, 1],
['Innodb_rows_updated', 'updated', 'incremental', 1, 1],
['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1],
- ]},
+ ]
+ },
'innodb_buffer_pool_pages': {
'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb',
'mysql.innodb_buffer_pool_pages', 'line'],
@@ -309,13 +392,15 @@ CHARTS = {
['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1],
['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
['Innodb_buffer_pool_pages_total', 'total', 'absolute']
- ]},
+ ]
+ },
'innodb_buffer_pool_bytes': {
'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
'lines': [
['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
- ]},
+ ]
+ },
'innodb_buffer_pool_read_ahead': {
'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb',
'mysql.innodb_buffer_pool_read_ahead', 'area'],
@@ -323,21 +408,24 @@ CHARTS = {
['Innodb_buffer_pool_read_ahead', 'all', 'incremental'],
['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1],
['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental']
- ]},
+ ]
+ },
'innodb_buffer_pool_reqs': {
'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
'mysql.innodb_buffer_pool_reqs', 'area'],
'lines': [
['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1]
- ]},
+ ]
+ },
'innodb_buffer_pool_ops': {
'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
'mysql.innodb_buffer_pool_ops', 'area'],
'lines': [
['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1]
- ]},
+ ]
+ },
'qcache_ops': {
'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
'lines': [
@@ -345,60 +433,70 @@ CHARTS = {
['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
['Qcache_inserts', 'inserts', 'incremental'],
['Qcache_not_cached', 'not cached', 'incremental', -1, 1]
- ]},
+ ]
+ },
'qcache': {
'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
'lines': [
['Qcache_queries_in_cache', 'queries', 'absolute']
- ]},
+ ]
+ },
'qcache_freemem': {
'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'],
'lines': [
['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
- ]},
+ ]
+ },
'qcache_memblocks': {
'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
'lines': [
['Qcache_free_blocks', 'free', 'absolute'],
['Qcache_total_blocks', 'total', 'absolute']
- ]},
+ ]
+ },
'key_blocks': {
'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
'lines': [
['Key_blocks_unused', 'unused', 'absolute'],
['Key_blocks_used', 'used', 'absolute', -1, 1],
['Key_blocks_not_flushed', 'not flushed', 'absolute']
- ]},
+ ]
+ },
'key_requests': {
'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
'lines': [
['Key_read_requests', 'reads', 'incremental'],
['Key_write_requests', 'writes', 'incremental', -1, 1]
- ]},
+ ]
+ },
'key_disk_ops': {
'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s',
'myisam', 'mysql.key_disk_ops', 'area'],
'lines': [
['Key_reads', 'reads', 'incremental'],
['Key_writes', 'writes', 'incremental', -1, 1]
- ]},
+ ]
+ },
'files': {
'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'],
'lines': [
['Open_files', 'files', 'absolute']
- ]},
+ ]
+ },
'files_rate': {
'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
'lines': [
['Opened_files', 'files', 'incremental']
- ]},
+ ]
+ },
'binlog_stmt_cache': {
'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog',
'mysql.binlog_stmt_cache', 'line'],
'lines': [
['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
['Binlog_stmt_cache_use', 'all', 'incremental']
- ]},
+ ]
+ },
'connection_errors': {
'options': [None, 'mysql Connection Errors', 'connections/s', 'connections',
'mysql.connection_errors', 'line'],
@@ -409,47 +507,55 @@ CHARTS = {
['Connection_errors_peer_address', 'peer_addr', 'incremental'],
['Connection_errors_select', 'select', 'incremental'],
['Connection_errors_tcpwrap', 'tcpwrap', 'incremental']
- ]},
+ ]
+ },
'slave_behind': {
'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
'lines': [
['Seconds_Behind_Master', 'seconds', 'absolute']
- ]},
+ ]
+ },
'slave_status': {
'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
'lines': [
['Slave_SQL_Running', 'sql_running', 'absolute'],
['Slave_IO_Running', 'io_running', 'absolute']
- ]},
+ ]
+ },
'galera_writesets': {
'options': [None, 'Replicated writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
'lines': [
['wsrep_received', 'rx', 'incremental'],
['wsrep_replicated', 'tx', 'incremental', -1, 1],
- ]},
+ ]
+ },
'galera_bytes': {
'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'],
'lines': [
['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
- ]},
+ ]
+ },
'galera_queue': {
'options': [None, 'Galera queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
'lines': [
['wsrep_local_recv_queue', 'rx', 'absolute'],
['wsrep_local_send_queue', 'tx', 'absolute', -1, 1],
- ]},
+ ]
+ },
'galera_conflicts': {
'options': [None, 'Replication conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
'lines': [
['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'],
['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1],
- ]},
+ ]
+ },
'galera_flow_control': {
'options': [None, 'Flow control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
'lines': [
['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000],
- ]}
+ ]
+ }
}
@@ -458,7 +564,7 @@ class Service(MySQLService):
MySQLService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE)
+ self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE, variables=QUERY_VARIABLES)
def _get_data(self):
@@ -487,5 +593,10 @@ class Service(MySQLService):
else:
self.queries.pop('slave_status')
- return to_netdata or None
+ if 'variables' in raw_data:
+ variables = dict(raw_data['variables'][0])
+ for key in VARIABLES:
+ if key in variables:
+ to_netdata[key] = variables[key]
+ return to_netdata or None
diff --git a/python.d/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
index 2e4f0d1b5..09c6bbd37 100644
--- a/python.d/nginx.chart.py
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: nginx netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.UrlService import UrlService
@@ -25,28 +26,32 @@ CHARTS = {
'options': [None, 'nginx Active Connections', 'connections', 'active connections',
'nginx.connections', 'line'],
'lines': [
- ["active"]
- ]},
+ ['active']
+ ]
+ },
'requests': {
'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
'lines': [
- ["requests", None, 'incremental']
- ]},
+ ['requests', None, 'incremental']
+ ]
+ },
'connection_status': {
'options': [None, 'nginx Active Connections by Status', 'connections', 'status',
'nginx.connection_status', 'line'],
'lines': [
- ["reading"],
- ["writing"],
- ["waiting", "idle"]
- ]},
+ ['reading'],
+ ['writing'],
+ ['waiting', 'idle']
+ ]
+ },
'connect_rate': {
'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate',
'nginx.connect_rate', 'line'],
'lines': [
- ["accepts", "accepted", "incremental"],
- ["handled", None, "incremental"]
- ]}
+ ['accepts', 'accepted', 'incremental'],
+ ['handled', None, 'incremental']
+ ]
+ }
}
diff --git a/python.d/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
index 509ddd380..1392f5a56 100644
--- a/python.d/nginx_plus.chart.py
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: nginx_plus netdata python.d module
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import re
@@ -21,63 +22,71 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['requests_total', 'requests_current',
- 'connections_statistics', 'connections_workers',
- 'ssl_handshakes', 'ssl_session_reuses', 'ssl_memory_usage',
- 'processes']
+ORDER = [
+ 'requests_total',
+ 'requests_current',
+ 'connections_statistics',
+ 'connections_workers',
+ 'ssl_handshakes',
+ 'ssl_session_reuses',
+ 'ssl_memory_usage',
+ 'processes'
+]
CHARTS = {
'requests_total': {
- 'options': [None, 'Requests Total', 'requests/s',
- 'requests', 'nginx_plus.requests_total', 'line'],
+ 'options': [None, 'Requests Total', 'requests/s', 'requests', 'nginx_plus.requests_total', 'line'],
'lines': [
['requests_total', 'total', 'incremental']
- ]},
+ ]
+ },
'requests_current': {
- 'options': [None, 'Requests Current', 'requests',
- 'requests', 'nginx_plus.requests_current', 'line'],
+ 'options': [None, 'Requests Current', 'requests', 'requests', 'nginx_plus.requests_current', 'line'],
'lines': [
['requests_current', 'current']
- ]},
+ ]
+ },
'connections_statistics': {
'options': [None, 'Connections Statistics', 'connections/s',
'connections', 'nginx_plus.connections_statistics', 'stacked'],
'lines': [
['connections_accepted', 'accepted', 'incremental'],
['connections_dropped', 'dropped', 'incremental']
- ]},
+ ]
+ },
'connections_workers': {
'options': [None, 'Workers Statistics', 'workers',
'connections', 'nginx_plus.connections_workers', 'stacked'],
'lines': [
['connections_idle', 'idle'],
['connections_active', 'active']
- ]},
+ ]
+ },
'ssl_handshakes': {
- 'options': [None, 'SSL Handshakes', 'handshakes/s',
- 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'],
+ 'options': [None, 'SSL Handshakes', 'handshakes/s', 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'],
'lines': [
['ssl_handshakes', 'successful', 'incremental'],
['ssl_handshakes_failed', 'failed', 'incremental']
- ]},
+ ]
+ },
'ssl_session_reuses': {
- 'options': [None, 'Session Reuses', 'sessions/s',
- 'ssl', 'nginx_plus.ssl_session_reuses', 'line'],
+ 'options': [None, 'Session Reuses', 'sessions/s', 'ssl', 'nginx_plus.ssl_session_reuses', 'line'],
'lines': [
['ssl_session_reuses', 'reused', 'incremental']
- ]},
+ ]
+ },
'ssl_memory_usage': {
- 'options': [None, 'Memory Usage', '%',
- 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', '%', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
'lines': [
['ssl_memory_usage', 'usage', 'absolute', 1, 100]
- ]},
+ ]
+ },
'processes': {
- 'options': [None, 'Processes', 'processes',
- 'processes', 'nginx_plus.processes', 'line'],
+ 'options': [None, 'Processes', 'processes', 'processes', 'nginx_plus.processes', 'line'],
'lines': [
['processes_respawned', 'respawned']
- ]}
+ ]
+ }
}
@@ -86,17 +95,15 @@ def cache_charts(cache):
charts = OrderedDict()
charts['{0}_traffic'.format(cache.name)] = {
- 'options': [None, 'Traffic', 'KB', family,
- 'nginx_plus.cache_traffic', 'stacked'],
+ 'options': [None, 'Traffic', 'KB', family, 'nginx_plus.cache_traffic', 'stacked'],
'lines': [
['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
['_'.join([cache.name, 'miss_bytes']), 'bypass', 'absolute', 1, 1024]
- ]
+ ]
}
charts['{0}_memory_usage'.format(cache.name)] = {
- 'options': [None, 'Memory Usage', '%', family,
- 'nginx_plus.cache_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.cache_memory_usage', 'area'],
'lines': [
['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
]
@@ -160,8 +167,7 @@ def web_upstream_charts(wu):
# Requests
charts['web_upstream_{name}_requests'.format(name=wu.name)] = {
- 'options': [None, 'Peers Requests', 'requests/s', family,
- 'nginx_plus.web_upstream_requests', 'line'],
+ 'options': [None, 'Peers Requests', 'requests/s', family, 'nginx_plus.web_upstream_requests', 'line'],
'lines': dimensions('requests', 'incremental')
}
# Responses Codes
@@ -177,7 +183,7 @@ def web_upstream_charts(wu):
]
}
for peer in wu:
- charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.id)] = {
+ charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.server)] = {
'options': [None, 'Peer "{0}" Responses'.format(peer.real_server), 'responses/s', family,
'nginx_plus.web_upstream_peer_responses', 'stacked'],
'lines': [
@@ -190,26 +196,23 @@ def web_upstream_charts(wu):
}
# Connections
charts['web_upstream_{name}_connections'.format(name=wu.name)] = {
- 'options': [None, 'Peers Connections', 'active', family,
- 'nginx_plus.web_upstream_connections', 'line'],
+ 'options': [None, 'Peers Connections', 'active', family, 'nginx_plus.web_upstream_connections', 'line'],
'lines': dimensions('active')
}
charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
- 'options': [None, 'Peers Connections Usage', '%', family,
- 'nginx_plus.web_upstream_connections_usage', 'line'],
+ 'options': [None, 'Peers Connections Usage', '%', family, 'nginx_plus.web_upstream_connections_usage', 'line'],
'lines': dimensions('connections_usage', d=100)
}
# Traffic
charts['web_upstream_{0}_all_net'.format(wu.name)] = {
- 'options': [None, 'All Peers Traffic', 'kilobits/s', family,
- 'nginx_plus.web_upstream_all_net', 'area'],
+ 'options': [None, 'All Peers Traffic', 'kilobits/s', family, 'nginx_plus.web_upstream_all_net', 'area'],
'lines': [
['{0}_received'.format(wu.name), 'received', 'incremental', 1, 1000],
['{0}_sent'.format(wu.name), 'sent', 'incremental', -1, 1000]
]
}
for peer in wu:
- charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.id)] = {
+ charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.server)] = {
'options': [None, 'Peer "{0}" Traffic'.format(peer.real_server), 'kilobits/s', family,
'nginx_plus.web_upstream_peer_traffic', 'area'],
'lines': [
@@ -219,7 +222,7 @@ def web_upstream_charts(wu):
}
# Response Time
for peer in wu:
- charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.id)] = {
+ charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family,
'nginx_plus.web_upstream_peer_timings', 'line'],
'lines': [
@@ -229,30 +232,27 @@ def web_upstream_charts(wu):
}
# Memory Usage
charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
- 'options': [None, 'Memory Usage', '%', family,
- 'nginx_plus.web_upstream_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
'lines': [
['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
]
}
# State
charts['web_upstream_{name}_status'.format(name=wu.name)] = {
- 'options': [None, 'Peers Status', 'state', family,
- 'nginx_plus.web_upstream_status', 'line'],
+ 'options': [None, 'Peers Status', 'state', family, 'nginx_plus.web_upstream_status', 'line'],
'lines': dimensions('state')
}
# Downtime
charts['web_upstream_{name}_downtime'.format(name=wu.name)] = {
- 'options': [None, 'Peers Downtime', 'seconds', family,
- 'nginx_plus.web_upstream_peer_downtime', 'line'],
+ 'options': [None, 'Peers Downtime', 'seconds', family, 'nginx_plus.web_upstream_peer_downtime', 'line'],
'lines': dimensions('downtime', d=1000)
}
return charts
-METRICS = dict(
- SERVER=[
+METRICS = {
+ 'SERVER': [
'processes.respawned',
'connections.accepted',
'connections.dropped',
@@ -266,7 +266,7 @@ METRICS = dict(
'slabs.SSL.pages.free',
'slabs.SSL.pages.used'
],
- WEB_ZONE=[
+ 'WEB_ZONE': [
'processing',
'requests',
'responses.1xx',
@@ -278,7 +278,7 @@ METRICS = dict(
'received',
'sent'
],
- WEB_UPSTREAM_PEER=[
+ 'WEB_UPSTREAM_PEER': [
'id',
'server',
'name',
@@ -297,7 +297,7 @@ METRICS = dict(
'received',
'downtime'
],
- WEB_UPSTREAM_SUMMARY=[
+ 'WEB_UPSTREAM_SUMMARY': [
'responses.1xx',
'responses.2xx',
'responses.3xx',
@@ -306,13 +306,13 @@ METRICS = dict(
'sent',
'received'
],
- CACHE=[
+ 'CACHE': [
'hit.bytes', # served
'miss.bytes_written', # written
'miss.bytes' # bypass
]
-)
+}
BAD_SYMBOLS = re.compile(r'[:/.-]+')
@@ -373,6 +373,7 @@ class WebUpstream:
return peer
def peers_stats(self, peers):
+ peers = {int(peer['id']): peer for peer in peers}
data = dict()
for peer in self.peers.values():
if not peer.active:
diff --git a/python.d/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
index 499dfda2e..d713f46bd 100644
--- a/python.d/nsd.chart.py
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: NSD `nsd-control stats_noreset` netdata python.d module
# Author: <383c57 at gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
import re
@@ -16,27 +17,29 @@ ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
CHARTS = {
'queries': {
- 'options': [
- None, "queries", 'queries/s', 'queries', 'nsd.queries', 'line'],
+ 'options': [None, 'queries', 'queries/s', 'queries', 'nsd.queries', 'line'],
'lines': [
- ['num_queries', 'queries', 'incremental'],]},
+ ['num_queries', 'queries', 'incremental']
+ ]
+ },
'zones': {
- 'options': [
- None, "zones", 'zones', 'zones', 'nsd.zones', 'stacked'],
+ 'options': [None, 'zones', 'zones', 'zones', 'nsd.zones', 'stacked'],
'lines': [
['zone_master', 'master', 'absolute'],
- ['zone_slave', 'slave', 'absolute'],]},
+ ['zone_slave', 'slave', 'absolute']
+ ]
+ },
'protocol': {
- 'options': [
- None, "protocol", 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
+ 'options': [None, 'protocol', 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
'lines': [
['num_udp', 'udp', 'incremental'],
['num_udp6', 'udp6', 'incremental'],
['num_tcp', 'tcp', 'incremental'],
- ['num_tcp6', 'tcp6', 'incremental'],]},
+ ['num_tcp6', 'tcp6', 'incremental']
+ ]
+ },
'type': {
- 'options': [
- None, "query type", 'queries/s', 'query type', 'nsd.type', 'stacked'],
+ 'options': [None, 'query type', 'queries/s', 'query type', 'nsd.type', 'stacked'],
'lines': [
['num_type_A', 'A', 'incremental'],
['num_type_NS', 'NS', 'incremental'],
@@ -49,16 +52,18 @@ CHARTS = {
['num_type_TXT', 'TXT', 'incremental'],
['num_type_AAAA', 'AAAA', 'incremental'],
['num_type_SRV', 'SRV', 'incremental'],
- ['num_type_TYPE255', 'ANY', 'incremental'],]},
+ ['num_type_TYPE255', 'ANY', 'incremental']
+ ]
+ },
'transfer': {
- 'options': [
- None, "transfer", 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
+ 'options': [None, 'transfer', 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
'lines': [
['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
- ['num_type_TYPE252', 'AXFR', 'incremental'],]},
+ ['num_type_TYPE252', 'AXFR', 'incremental']
+ ]
+ },
'rcode': {
- 'options': [
- None, "return code", 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
+ 'options': [None, 'return code', 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
'lines': [
['num_rcode_NOERROR', 'NOERROR', 'incremental'],
['num_rcode_FORMERR', 'FORMERR', 'incremental'],
@@ -66,7 +71,9 @@ CHARTS = {
['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
['num_rcode_REFUSED', 'REFUSED', 'incremental'],
- ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental'],]}
+ ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental']
+ ]
+ }
}
@@ -74,7 +81,7 @@ class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(
self, configuration=configuration, name=name)
- self.command = "nsd-control stats_noreset"
+ self.command = 'nsd-control stats_noreset'
self.order = ORDER
self.definitions = CHARTS
self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
diff --git a/python.d/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
index 05209da87..79d557c80 100644
--- a/python.d/ntpd.chart.py
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -2,6 +2,7 @@
# Description: ntpd netdata python.d module
# Author: Sven Mäder (rda0)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import struct
import re
@@ -56,108 +57,117 @@ CHARTS = {
'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'],
'lines': [
['offset', 'offset', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_jitter': {
'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'],
'lines': [
['sys_jitter', 'system', 'absolute', 1, PRECISION],
['clk_jitter', 'clock', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_frequency': {
'options': [None, 'Frequency offset relative to hardware clock', 'ppm', 'system', 'ntpd.sys_frequency', 'area'],
'lines': [
['frequency', 'frequency', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_wander': {
'options': [None, 'Clock frequency wander', 'ppm', 'system', 'ntpd.sys_wander', 'area'],
'lines': [
['clk_wander', 'clock', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_rootdelay': {
'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system',
'ntpd.sys_rootdelay', 'area'],
'lines': [
['rootdelay', 'delay', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_rootdisp': {
'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system',
'ntpd.sys_rootdisp', 'area'],
'lines': [
['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_stratum': {
'options': [None, 'Stratum (1-15)', 'stratum', 'system', 'ntpd.sys_stratum', 'line'],
'lines': [
['stratum', 'stratum', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_tc': {
'options': [None, 'Time constant and poll exponent (3-17)', 'log2 s', 'system', 'ntpd.sys_tc', 'line'],
'lines': [
['tc', 'current', 'absolute', 1, PRECISION],
['mintc', 'minimum', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_precision': {
'options': [None, 'Precision', 'log2 s', 'system', 'ntpd.sys_precision', 'line'],
'lines': [
['precision', 'precision', 'absolute', 1, PRECISION]
- ]}
+ ]
+ }
}
PEER_CHARTS = {
'peer_offset': {
'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_delay': {
'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_dispersion': {
'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_jitter': {
'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_xleave': {
'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_rootdelay': {
'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers',
'ntpd.peer_rootdelay', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_rootdisp': {
'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'peers',
'ntpd.peer_rootdisp', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_stratum': {
'options': [None, 'Stratum (1-15)', 'stratum', 'peers', 'ntpd.peer_stratum', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_hmode': {
'options': [None, 'Host mode (1-6)', 'hmode', 'peers', 'ntpd.peer_hmode', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_pmode': {
'options': [None, 'Peer mode (1-5)', 'pmode', 'peers', 'ntpd.peer_pmode', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_hpoll': {
'options': [None, 'Host poll exponent', 'log2 s', 'peers', 'ntpd.peer_hpoll', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_ppoll': {
'options': [None, 'Peer poll exponent', 'log2 s', 'peers', 'ntpd.peer_ppoll', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_precision': {
'options': [None, 'Precision', 'log2 s', 'peers', 'ntpd.peer_precision', 'line'],
- 'lines': [
- ]}
+ 'lines': []
+ }
}
diff --git a/python.d/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
index 519c77fa3..64d7062d9 100644
--- a/python.d/ovpn_status_log.chart.py
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: openvpn status log netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from re import compile as r_compile
@@ -16,15 +17,19 @@ CHARTS = {
'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
'lines': [
['users', None, 'absolute'],
- ]},
+ ]
+ },
'traffic': {
'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'],
'lines': [
['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10]
- ]},
-
+ ]
+ }
}
+TLS_REGEX = r_compile(r'(?:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)')
+STATIC_KEY_REGEX = r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)')
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
@@ -32,8 +37,10 @@ class Service(SimpleService):
self.order = ORDER
self.definitions = CHARTS
self.log_path = self.configuration.get('log_path')
- self.regex = dict(tls=r_compile(r'\d{1,3}(?:\.\d{1,3}){3}(?::\d+)? (?P<bytes_in>\d+) (?P<bytes_out>\d+)'),
- static_key=r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'))
+ self.regex = {
+ 'tls': TLS_REGEX,
+ 'static_key': STATIC_KEY_REGEX
+ }
def check(self):
if not (self.log_path and isinstance(self.log_path, str)):
@@ -57,7 +64,7 @@ class Service(SimpleService):
break
if found:
return True
- self.error("Failed to parse ovpenvpn log file")
+ self.error('Failed to parse ovpenvpn log file')
return False
def _get_raw_data(self):
@@ -107,8 +114,12 @@ class Service(SimpleService):
data = dict(users=0, bytes_in=0, bytes_out=0)
for row in raw_data:
- row = ' '.join(row.split(',')) if ',' in row else ' '.join(row.split())
- match = self.regex['tls'].search(row)
+ columns = row.split(',') if ',' in row else row.split()
+ if 'UNDEF' in columns:
+ # see https://openvpn.net/archive/openvpn-users/2004-08/msg00116.html
+ continue
+
+ match = self.regex['tls'].search(' '.join(columns))
if match:
match = match.groupdict()
data['users'] += 1
diff --git a/python.d/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
index ea7a9a7e6..a3f0963fc 100644
--- a/python.d/phpfpm.chart.py
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description: PHP-FPM netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
import re
@@ -58,18 +60,21 @@ CHARTS = {
['active'],
['maxActive', 'max active'],
['idle']
- ]},
+ ]
+ },
'requests': {
'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
'lines': [
['requests', None, 'incremental']
- ]},
+ ]
+ },
'performance': {
'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
'lines': [
['reached', 'max children reached'],
['slow', 'slow requests']
- ]},
+ ]
+ },
'request_duration': {
'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration',
'line'],
@@ -77,21 +82,24 @@ CHARTS = {
['minReqDur', 'min', 'absolute', 1, 1000],
['maxReqDur', 'max', 'absolute', 1, 1000],
['avgReqDur', 'avg', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'request_cpu': {
'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
'lines': [
['minReqCpu', 'min'],
['maxReqCpu', 'max'],
['avgReqCpu', 'avg']
- ]},
+ ]
+ },
'request_mem': {
'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
'lines': [
['minReqMem', 'min', 'absolute', 1, 1024],
['maxReqMem', 'max', 'absolute', 1, 1024],
['avgReqMem', 'avg', 'absolute', 1, 1024]
- ]}
+ ]
+ }
}
diff --git a/python.d/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
index 0a312210d..e86f82544 100644
--- a/python.d/portcheck.chart.py
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: simple port check netdata python.d module
# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
import socket
@@ -36,7 +37,8 @@ CHARTS = {
[PORT_SUCCESS, 'success', 'absolute'],
[PORT_TIMEOUT, 'timeout', 'absolute'],
[PORT_FAILED, 'no connection', 'absolute']
- ]}
+ ]
+ }
}
@@ -56,13 +58,13 @@ class Service(SimpleService):
:return: boolean
"""
if self.host is None or self.port is None:
- self.error("Host or port missing")
+ self.error('Host or port missing')
return False
if not isinstance(self.port, int):
self.error('"port" is not an integer. Specify a numerical value, not service name.')
return False
- self.debug("Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s".format(
+ self.debug('Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s'.format(
host=self.host, port=self.port, update=self.update_every, timeout=self.timeout
))
# We will accept any (valid-ish) configuration, even if initial connection fails (a service might be down from
@@ -101,7 +103,7 @@ class Service(SimpleService):
return data
def _create_socket(self, socket_config):
- af, sock_type, proto, canon_name, sa = socket_config
+ af, sock_type, proto, _, sa = socket_config
try:
self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
sock = socket.socket(af, sock_type, proto)
@@ -119,7 +121,7 @@ class Service(SimpleService):
:return: dict
"""
- af, sock_type, proto, canon_name, sa = socket_config
+ af, _, proto, _, sa = socket_config
port = str(sa[1])
try:
self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port))
diff --git a/python.d/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
index a2129e4be..bdbd0feea 100644
--- a/python.d/postfix.chart.py
+++ b/collectors/python.d.plugin/postfix/postfix.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: postfix netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.ExecutableService import ExecutableService
@@ -14,22 +15,24 @@ ORDER = ['qemails', 'qsize']
CHARTS = {
'qemails': {
- 'options': [None, "Postfix Queue Emails", "emails", 'queue', 'postfix.qemails', 'line'],
+ 'options': [None, 'Postfix Queue Emails', 'emails', 'queue', 'postfix.qemails', 'line'],
'lines': [
['emails', None, 'absolute']
- ]},
+ ]
+ },
'qsize': {
- 'options': [None, "Postfix Queue Emails Size", "emails size in KB", 'queue', 'postfix.qsize', 'area'],
+ 'options': [None, 'Postfix Queue Emails Size', 'emails size in KB', 'queue', 'postfix.qsize', 'area'],
'lines': [
- ["size", None, 'absolute']
- ]}
+ ['size', None, 'absolute']
+ ]
+ }
}
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = "postqueue -p"
+ self.command = 'postqueue -p'
self.order = ORDER
self.definitions = CHARTS
diff --git a/python.d/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
index 0522b1938..7f43877c3 100644
--- a/python.d/postgres.chart.py
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: example netdata python.d module
# Authors: facetoe, dangtranhoang
+# SPDX-License-Identifier: GPL-3.0-or-later
from copy import deepcopy
@@ -20,221 +21,310 @@ update_every = 1
priority = 60000
retries = 60
-METRICS = dict(
- DATABASE=['connections',
- 'xact_commit',
- 'xact_rollback',
- 'blks_read',
- 'blks_hit',
- 'tup_returned',
- 'tup_fetched',
- 'tup_inserted',
- 'tup_updated',
- 'tup_deleted',
- 'conflicts',
- 'temp_files',
- 'temp_bytes',
- 'size'],
- BACKENDS=['backends_active',
- 'backends_idle'],
- INDEX_STATS=['index_count',
- 'index_size'],
- TABLE_STATS=['table_size',
- 'table_count'],
- WAL=['written_wal',
- 'recycled_wal',
- 'total_wal'],
- WAL_WRITES=['wal_writes'],
- ARCHIVE=['ready_count',
- 'done_count',
- 'file_count'],
- BGWRITER=['checkpoint_scheduled',
- 'checkpoint_requested',
- 'buffers_checkpoint',
- 'buffers_clean',
- 'maxwritten_clean',
- 'buffers_backend',
- 'buffers_alloc',
- 'buffers_backend_fsync'],
- LOCKS=['ExclusiveLock',
- 'RowShareLock',
- 'SIReadLock',
- 'ShareUpdateExclusiveLock',
- 'AccessExclusiveLock',
- 'AccessShareLock',
- 'ShareRowExclusiveLock',
- 'ShareLock',
- 'RowExclusiveLock'],
- AUTOVACUUM=['analyze',
- 'vacuum_analyze',
- 'vacuum',
- 'vacuum_freeze',
- 'brin_summarize'],
- STANDBY_DELTA=['sent_delta',
- 'write_delta',
- 'flush_delta',
- 'replay_delta'],
- REPSLOT_FILES=['replslot_wal_keep',
- 'replslot_files']
-
-)
-
-QUERIES = dict(
- WAL="""
+METRICS = {
+ 'DATABASE': [
+ 'connections',
+ 'xact_commit',
+ 'xact_rollback',
+ 'blks_read',
+ 'blks_hit',
+ 'tup_returned',
+ 'tup_fetched',
+ 'tup_inserted',
+ 'tup_updated',
+ 'tup_deleted',
+ 'conflicts',
+ 'temp_files',
+ 'temp_bytes',
+ 'size'
+ ],
+ 'BACKENDS': [
+ 'backends_active',
+ 'backends_idle'
+ ],
+ 'INDEX_STATS': [
+ 'index_count',
+ 'index_size'
+ ],
+ 'TABLE_STATS': [
+ 'table_size',
+ 'table_count'
+ ],
+ 'WAL': [
+ 'written_wal',
+ 'recycled_wal',
+ 'total_wal'
+ ],
+ 'WAL_WRITES': [
+ 'wal_writes'
+ ],
+ 'ARCHIVE': [
+ 'ready_count',
+ 'done_count',
+ 'file_count'
+ ],
+ 'BGWRITER': [
+ 'checkpoint_scheduled',
+ 'checkpoint_requested',
+ 'buffers_checkpoint',
+ 'buffers_clean',
+ 'maxwritten_clean',
+ 'buffers_backend',
+ 'buffers_alloc',
+ 'buffers_backend_fsync'
+ ],
+ 'LOCKS': [
+ 'ExclusiveLock',
+ 'RowShareLock',
+ 'SIReadLock',
+ 'ShareUpdateExclusiveLock',
+ 'AccessExclusiveLock',
+ 'AccessShareLock',
+ 'ShareRowExclusiveLock',
+ 'ShareLock',
+ 'RowExclusiveLock'
+ ],
+ 'AUTOVACUUM': [
+ 'analyze',
+ 'vacuum_analyze',
+ 'vacuum',
+ 'vacuum_freeze',
+ 'brin_summarize'
+ ],
+ 'STANDBY_DELTA': [
+ 'sent_delta',
+ 'write_delta',
+ 'flush_delta',
+ 'replay_delta'
+ ],
+ 'REPSLOT_FILES': [
+ 'replslot_wal_keep',
+ 'replslot_files'
+ ]
+}
+
+QUERIES = {
+ 'WAL': """
SELECT
- count(*) as total_wal,
- count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
- count(*) FILTER (WHERE type = 'written') AS written_wal
+ count(*) as total_wal,
+ count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
+ count(*) FILTER (WHERE type = 'written') AS written_wal
FROM
- (SELECT wal.name,
- pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ),
- CASE WHEN wal.name > pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ) THEN 'recycled'
- ELSE 'written'
- END AS type
- FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name)
- WHERE name ~ '^[0-9A-F]{{24}}$'
- ORDER BY (pg_stat_file('pg_{0}/'||name)).modification, wal.name DESC) sub;
+ (SELECT
+ wal.name,
+ pg_{0}file_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_{0}_{1}()
+ END ),
+ CASE
+ WHEN wal.name > pg_{0}file_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_{0}_{1}()
+ END ) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{{24}}$'
+ ORDER BY
+ (pg_stat_file('pg_{0}/'||name)).modification,
+ wal.name DESC) sub;
""",
- ARCHIVE="""
+ 'ARCHIVE': """
SELECT
CAST(COUNT(*) AS INT) AS file_count,
- CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count,
- CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
FROM
pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file);
""",
- BACKENDS="""
+ 'BACKENDS': """
SELECT
- count(*) - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') AS backends_active,
- (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle' ) AS backends_idle
-FROM pg_stat_activity;
+ count(*) - (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_active,
+ (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_idle
+FROM pg_stat_activity;
""",
- TABLE_STATS="""
+ 'TABLE_STATS': """
SELECT
- ((sum(relpages) * 8) * 1024) AS table_size,
- count(1) AS table_count
+ ((sum(relpages) * 8) * 1024) AS table_size,
+ count(1) AS table_count
FROM pg_class
WHERE relkind IN ('r', 't');
""",
- INDEX_STATS="""
+ 'INDEX_STATS': """
SELECT
- ((sum(relpages) * 8) * 1024) AS index_size,
- count(1) AS index_count
+ ((sum(relpages) * 8) * 1024) AS index_size,
+ count(1) AS index_count
FROM pg_class
-WHERE relkind = 'i';""",
- DATABASE="""
+WHERE relkind = 'i';
+""",
+ 'DATABASE': """
SELECT
- datname AS database_name,
- numbackends AS connections,
- xact_commit AS xact_commit,
- xact_rollback AS xact_rollback,
- blks_read AS blks_read,
- blks_hit AS blks_hit,
- tup_returned AS tup_returned,
- tup_fetched AS tup_fetched,
- tup_inserted AS tup_inserted,
- tup_updated AS tup_updated,
- tup_deleted AS tup_deleted,
- conflicts AS conflicts,
- pg_database_size(datname) AS size,
- temp_files AS temp_files,
- temp_bytes AS temp_bytes
+ datname AS database_name,
+ numbackends AS connections,
+ xact_commit AS xact_commit,
+ xact_rollback AS xact_rollback,
+ blks_read AS blks_read,
+ blks_hit AS blks_hit,
+ tup_returned AS tup_returned,
+ tup_fetched AS tup_fetched,
+ tup_inserted AS tup_inserted,
+ tup_updated AS tup_updated,
+ tup_deleted AS tup_deleted,
+ conflicts AS conflicts,
+ pg_database_size(datname) AS size,
+ temp_files AS temp_files,
+ temp_bytes AS temp_bytes
FROM pg_stat_database
-WHERE datname IN %(databases)s
-;
+WHERE datname IN %(databases)s ;
""",
- BGWRITER="""
+ 'BGWRITER': """
SELECT
- checkpoints_timed AS checkpoint_scheduled,
- checkpoints_req AS checkpoint_requested,
- buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint,
- buffers_clean * current_setting('block_size')::numeric buffers_clean,
- maxwritten_clean,
- buffers_backend * current_setting('block_size')::numeric buffers_backend,
- buffers_alloc * current_setting('block_size')::numeric buffers_alloc,
- buffers_backend_fsync
+ checkpoints_timed AS checkpoint_scheduled,
+ checkpoints_req AS checkpoint_requested,
+ buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint,
+ buffers_clean * current_setting('block_size')::numeric buffers_clean,
+ maxwritten_clean,
+ buffers_backend * current_setting('block_size')::numeric buffers_backend,
+ buffers_alloc * current_setting('block_size')::numeric buffers_alloc,
+ buffers_backend_fsync
FROM pg_stat_bgwriter;
""",
- LOCKS="""
+ 'LOCKS': """
SELECT
- pg_database.datname as database_name,
- mode,
- count(mode) AS locks_count
+ pg_database.datname as database_name,
+ mode,
+ count(mode) AS locks_count
FROM pg_locks
- INNER JOIN pg_database ON pg_database.oid = pg_locks.database
+INNER JOIN pg_database
+ ON pg_database.oid = pg_locks.database
GROUP BY datname, mode
ORDER BY datname, mode;
""",
- FIND_DATABASES="""
-SELECT datname
+ 'FIND_DATABASES': """
+SELECT
+ datname
FROM pg_stat_database
-WHERE has_database_privilege((SELECT current_user), datname, 'connect')
-AND NOT datname ~* '^template\d+';
+WHERE
+ has_database_privilege(
+ (SELECT current_user), datname, 'connect')
+ AND NOT datname ~* '^template\d ';
""",
- FIND_STANDBY="""
-SELECT application_name
+ 'FIND_STANDBY': """
+SELECT
+ application_name
FROM pg_stat_replication
WHERE application_name IS NOT NULL
GROUP BY application_name;
""",
- FIND_REPLICATION_SLOT="""
+ 'FIND_REPLICATION_SLOT': """
SELECT slot_name
FROM pg_replication_slots;
""",
- STANDBY_DELTA="""
-SELECT application_name,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , sent_{1}) AS sent_delta,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , write_{1}) AS write_delta,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , flush_{1}) AS flush_delta,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , replay_{1}) AS replay_delta
+ 'STANDBY_DELTA': """
+SELECT
+ application_name,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ sent_{1}) AS sent_delta,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ write_{1}) AS write_delta,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ flush_{1}) AS flush_delta,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ replay_{1}) AS replay_delta
FROM pg_stat_replication
WHERE application_name IS NOT NULL;
""",
- REPSLOT_FILES="""
+ 'REPSLOT_FILES': """
WITH wal_size AS (
- SELECT current_setting('wal_block_size')::INT * setting::INT AS val
- FROM pg_settings
- WHERE name = 'wal_segment_size'
-)
-SELECT slot_name, slot_type, replslot_wal_keep, count(slot_file) AS replslot_files
-FROM (
- SELECT slot.slot_name, CASE WHEN slot_file <> 'state' THEN 1 END AS slot_file , slot_type,
- COALESCE (floor((pg_wal_lsn_diff (pg_current_wal_lsn (),
- slot.restart_lsn) - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)),
- 0) AS replslot_wal_keep
- FROM pg_replication_slots slot
- LEFT JOIN (
- SELECT slot2.slot_name,
- pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
- FROM pg_replication_slots slot2
- ) files (slot_name, slot_file)
- ON slot.slot_name = files.slot_name
- CROSS JOIN wal_size s) AS d
-GROUP BY slot_name, slot_type, replslot_wal_keep;
+ SELECT
+ current_setting('wal_block_size')::INT * setting::INT AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn)
+ - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
""",
- IF_SUPERUSER="""
+ 'IF_SUPERUSER': """
SELECT current_setting('is_superuser') = 'on' AS is_superuser;
""",
- DETECT_SERVER_VERSION="""
+ 'DETECT_SERVER_VERSION': """
SHOW server_version_num;
""",
- AUTOVACUUM="""
+ 'AUTOVACUUM': """
SELECT
- count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
- count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
- count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%'
- AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%'
- AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum,
- count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze,
- count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize
+ count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%'
+ AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%'
+ AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum,
+ count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize
FROM pg_stat_activity
WHERE query NOT LIKE '%%pg_stat_activity%%';
""",
- DIFF_LSN="""
-SELECT pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END, '0/0') as wal_writes ;
+ 'DIFF_LSN': """
+SELECT
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ '0/0') as wal_writes ;
"""
-
-)
+}
QUERY_STATS = {
@@ -243,11 +333,34 @@ QUERY_STATS = {
QUERIES['LOCKS']: METRICS['LOCKS']
}
-ORDER = ['db_stat_temp_files', 'db_stat_temp_bytes', 'db_stat_blks', 'db_stat_tuple_returned', 'db_stat_tuple_write',
- 'db_stat_transactions','db_stat_connections', 'database_size', 'backend_process', 'index_count', 'index_size',
- 'table_count', 'table_size', 'wal', 'wal_writes', 'archive_wal', 'checkpointer', 'stat_bgwriter_alloc', 'stat_bgwriter_checkpoint',
- 'stat_bgwriter_backend', 'stat_bgwriter_backend_fsync' , 'stat_bgwriter_bgwriter', 'stat_bgwriter_maxwritten',
- 'replication_slot', 'standby_delta', 'autovacuum']
+ORDER = [
+ 'db_stat_temp_files',
+ 'db_stat_temp_bytes',
+ 'db_stat_blks',
+ 'db_stat_tuple_returned',
+ 'db_stat_tuple_write',
+ 'db_stat_transactions',
+ 'db_stat_connections',
+ 'database_size',
+ 'backend_process',
+ 'index_count',
+ 'index_size',
+ 'table_count',
+ 'table_size',
+ 'wal',
+ 'wal_writes',
+ 'archive_wal',
+ 'checkpointer',
+ 'stat_bgwriter_alloc',
+ 'stat_bgwriter_checkpoint',
+ 'stat_bgwriter_backend',
+ 'stat_bgwriter_backend_fsync',
+ 'stat_bgwriter_bgwriter',
+ 'stat_bgwriter_maxwritten',
+ 'replication_slot',
+ 'standby_delta',
+ 'autovacuum'
+]
CHARTS = {
'db_stat_transactions': {
@@ -256,26 +369,30 @@ CHARTS = {
'lines': [
['xact_commit', 'committed', 'incremental'],
['xact_rollback', 'rolled back', 'incremental']
- ]},
+ ]
+ },
'db_stat_connections': {
'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections',
'line'],
'lines': [
['connections', 'connections', 'absolute']
- ]},
+ ]
+ },
'db_stat_blks': {
'options': [None, 'Disk blocks reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_blks', 'line'],
'lines': [
['blks_read', 'disk', 'incremental'],
['blks_hit', 'cache', 'incremental']
- ]},
+ ]
+ },
'db_stat_tuple_returned': {
'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned',
'line'],
'lines': [
['tup_returned', 'sequential', 'incremental'],
['tup_fetched', 'bitmap', 'incremental']
- ]},
+ ]
+ },
'db_stat_tuple_write': {
'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'],
'lines': [
@@ -283,103 +400,128 @@ CHARTS = {
['tup_updated', 'updated', 'incremental'],
['tup_deleted', 'deleted', 'incremental'],
['conflicts', 'conflicts', 'incremental']
- ]},
+ ]
+ },
'db_stat_temp_bytes': {
- 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes', 'line'],
+ 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
+ 'line'],
'lines': [
['temp_bytes', 'size', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'db_stat_temp_files': {
- 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files', 'line'],
+ 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files',
+ 'line'],
'lines': [
['temp_files', 'files', 'incremental']
- ]},
+ ]
+ },
'database_size': {
'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'],
'lines': [
- ]},
+ ]
+ },
'backend_process': {
'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process',
'line'],
'lines': [
['backends_active', 'active', 'absolute'],
['backends_idle', 'idle', 'absolute']
- ]},
+ ]
+ },
'index_count': {
'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
'lines': [
['index_count', 'total', 'absolute']
- ]},
+ ]
+ },
'index_size': {
'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'],
'lines': [
['index_size', 'size', 'absolute', 1, 1024 * 1024]
- ]},
+ ]
+ },
'table_count': {
'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'],
'lines': [
['table_count', 'total', 'absolute']
- ]},
+ ]
+ },
'table_size': {
'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'],
'lines': [
['table_size', 'size', 'absolute', 1, 1024 * 1024]
- ]},
+ ]
+ },
'wal': {
'options': [None, 'Write-Ahead Logs', 'files', 'wal', 'postgres.wal', 'line'],
'lines': [
['written_wal', 'written', 'absolute'],
['recycled_wal', 'recycled', 'absolute'],
['total_wal', 'total', 'absolute']
- ]},
+ ]
+ },
'wal_writes': {
'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'],
'lines': [
['wal_writes', 'writes', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'archive_wal': {
'options': [None, 'Archive Write-Ahead Logs', 'files/s', 'archive wal', 'postgres.archive_wal', 'line'],
'lines': [
['file_count', 'total', 'incremental'],
['ready_count', 'ready', 'incremental'],
['done_count', 'done', 'incremental']
- ]},
+ ]
+ },
'checkpointer': {
'options': [None, 'Checkpoints', 'writes', 'checkpointer', 'postgres.checkpointer', 'line'],
'lines': [
['checkpoint_scheduled', 'scheduled', 'incremental'],
['checkpoint_requested', 'requested', 'incremental']
- ]},
+ ]
+ },
'stat_bgwriter_alloc': {
'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
'lines': [
['buffers_alloc', 'alloc', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'stat_bgwriter_checkpoint': {
- 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_checkpoint', 'line'],
+ 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter',
+ 'postgres.stat_bgwriter_checkpoint', 'line'],
'lines': [
['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'stat_bgwriter_backend': {
- 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_backend', 'line'],
+ 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter',
+ 'postgres.stat_bgwriter_backend', 'line'],
'lines': [
['buffers_backend', 'backend', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'stat_bgwriter_backend_fsync': {
'options': [None, 'Fsync by backend', 'times', 'bgwriter', 'postgres.stat_bgwriter_backend_fsync', 'line'],
'lines': [
['buffers_backend_fsync', 'backend fsync', 'incremental']
- ]},
+ ]
+ },
'stat_bgwriter_bgwriter': {
- 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter', 'postgres.bgwriter_bgwriter', 'line'],
+ 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter',
+ 'postgres.bgwriter_bgwriter', 'line'],
'lines': [
['buffers_clean', 'clean', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'stat_bgwriter_maxwritten': {
- 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten', 'line'],
+ 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten',
+ 'line'],
'lines': [
['maxwritten_clean', 'maxwritten', 'incremental']
- ]},
+ ]
+ },
'autovacuum': {
'options': [None, 'Autovacuum workers', 'workers', 'autovacuum', 'postgres.autovacuum', 'line'],
'lines': [
@@ -388,7 +530,8 @@ CHARTS = {
['vacuum_analyze', 'vacuum analyze', 'absolute'],
['vacuum_freeze', 'vacuum freeze', 'absolute'],
['brin_summarize', 'brin summarize', 'absolute']
- ]},
+ ]
+ },
'standby_delta': {
'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'],
'lines': [
@@ -396,13 +539,15 @@ CHARTS = {
['write_delta', 'write delta', 'absolute', 1, 1024],
['flush_delta', 'flush delta', 'absolute', 1, 1024],
['replay_delta', 'replay delta', 'absolute', 1, 1024]
- ]},
- 'replication_slot': {
+ ]
+ },
+ 'replication_slot': {
'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'],
'lines': [
['replslot_wal_keep', 'wal keeped', 'absolute'],
['replslot_files', 'pg_replslot files', 'absolute']
- ]}
+ ]
+ }
}
@@ -462,7 +607,7 @@ class Service(SimpleService):
cursor.close()
if self.database_poll and isinstance(self.database_poll, str):
- self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()]\
+ self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] \
or self.databases
self.locks_zeroed = populate_lock_types(self.databases)
@@ -482,8 +627,8 @@ class Service(SimpleService):
wal = 'xlog'
lsn = 'location'
self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER']
- self.queries[QUERIES['DIFF_LSN'].format(wal,lsn)] = METRICS['WAL_WRITES']
- self.queries[QUERIES['STANDBY_DELTA'].format(wal,lsn)] = METRICS['STANDBY_DELTA']
+ self.queries[QUERIES['DIFF_LSN'].format(wal, lsn)] = METRICS['WAL_WRITES']
+ self.queries[QUERIES['STANDBY_DELTA'].format(wal, lsn)] = METRICS['STANDBY_DELTA']
if self.index_stats:
self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS']
@@ -492,7 +637,7 @@ class Service(SimpleService):
if is_superuser:
self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE']
if self.server_version >= 90400:
- self.queries[QUERIES['WAL'].format(wal,lsn)] = METRICS['WAL']
+ self.queries[QUERIES['WAL'].format(wal, lsn)] = METRICS['WAL']
if self.server_version >= 100000:
self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES']
if self.server_version >= 90400:
@@ -501,8 +646,8 @@ class Service(SimpleService):
def create_dynamic_charts_(self):
for database_name in self.databases[::-1]:
- self.definitions['database_size']['lines'].append([database_name + '_size',
- database_name, 'absolute', 1, 1024 * 1024])
+ self.definitions['database_size']['lines'].append(
+ [database_name + '_size', database_name, 'absolute', 1, 1024 * 1024])
for chart_name in [name for name in self.order if name.startswith('db_stat')]:
add_database_stat_chart_(order=self.order, definitions=self.definitions,
name=chart_name, database_name=database_name)
@@ -510,17 +655,21 @@ class Service(SimpleService):
add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name)
for application_name in self.secondaries[::-1]:
- add_replication_delta_chart_(order=self.order, definitions=self.definitions,
- name='standby_delta', application_name=application_name)
+ add_replication_delta_chart_(
+ order=self.order,
+ definitions=self.definitions,
+ name='standby_delta',
+ application_name=application_name)
for slot_name in self.replication_slots[::-1]:
- add_replication_slot_chart_(order=self.order, definitions=self.definitions,
- name='replication_slot', slot_name=slot_name)
-
-
+ add_replication_slot_chart_(
+ order=self.order,
+ definitions=self.definitions,
+ name='replication_slot',
+ slot_name=slot_name)
def _get_data(self):
- result, error = self._connect()
+ result, _ = self._connect()
if result:
cursor = self.connection.cursor(cursor_factory=DictCursor)
try:
@@ -551,7 +700,8 @@ class Service(SimpleService):
else:
dimension_id = metric
if metric in row:
- self.data[dimension_id] = int(row[metric])
+ if row[metric] is not None:
+ self.data[dimension_id] = int(row[metric])
elif 'locks_count' in row:
self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0
@@ -564,6 +714,7 @@ def discover_databases_(cursor, query):
result.append(db)
return result
+
def discover_secondaries_(cursor, query):
cursor.execute(query)
result = list()
@@ -572,6 +723,7 @@ def discover_secondaries_(cursor, query):
result.append(sc)
return result
+
def discover_replication_slots_(cursor, query):
cursor.execute(query)
result = list()
@@ -580,14 +732,17 @@ def discover_replication_slots_(cursor, query):
result.append(slot)
return result
+
def check_if_superuser_(cursor, query):
cursor.execute(query)
return cursor.fetchone()[0]
+
def detect_server_version(cursor, query):
cursor.execute(query)
return int(cursor.fetchone()[0])
+
def populate_lock_types(databases):
result = dict()
for database in databases:
@@ -626,11 +781,12 @@ def add_database_stat_chart_(order, definitions, name, database_name):
chart_template = CHARTS[name]
chart_name = '_'.join([database_name, name])
order.insert(0, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
+ name, title, units, _, context, chart_type = chart_template['options']
definitions[chart_name] = {
'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type],
'lines': create_lines(database_name, chart_template['lines'])}
+
def add_replication_delta_chart_(order, definitions, name, application_name):
def create_lines(standby, lines):
result = list()
@@ -648,6 +804,7 @@ def add_replication_delta_chart_(order, definitions, name, application_name):
'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type],
'lines': create_lines(application_name, chart_template['lines'])}
+
def add_replication_slot_chart_(order, definitions, name, slot_name):
def create_lines(slot, lines):
result = list()
diff --git a/python.d/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/third_party/__init__.py
+++ b/collectors/python.d.plugin/python_modules/__init__.py
diff --git a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
index a71f2bfd2..72f9ff714 100644
--- a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
@@ -2,6 +2,7 @@
# Description:
# Author: Pawel Krupa (paulfantom)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
@@ -16,15 +17,15 @@ class ExecutableService(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.command = None
- def _get_raw_data(self, stderr=False):
+ def _get_raw_data(self, stderr=False, command=None):
"""
Get raw data from executed command
:return: <list>
"""
try:
- p = Popen(self.command, stdout=PIPE, stderr=PIPE)
+ p = Popen(command if command else self.command, stdout=PIPE, stderr=PIPE)
except Exception as error:
- self.error('Executing command {command} resulted in error: {error}'.format(command=self.command,
+ self.error('Executing command {command} resulted in error: {error}'.format(command=command or self.command,
error=error))
return None
data = list()
@@ -35,7 +36,7 @@ class ExecutableService(SimpleService):
except TypeError:
continue
- return data or None
+ return data
def check(self):
"""
diff --git a/python.d/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
index 45daa2446..5acfd73f8 100644
--- a/python.d/python_modules/bases/FrameworkServices/LogService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from glob import glob
import os
diff --git a/python.d/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
index 3acc5b109..53807e2c4 100644
--- a/python.d/python_modules/bases/FrameworkServices/MySQLService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from sys import exc_info
diff --git a/python.d/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
index 177332c1f..dd53fbc14 100644
--- a/python.d/python_modules/bases/FrameworkServices/SimpleService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -2,13 +2,12 @@
# Description:
# Author: Pawel Krupa (paulfantom)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from threading import Thread
+from time import sleep
-try:
- from time import sleep, monotonic as time
-except ImportError:
- from time import sleep, time
+from third_party.monotonic import monotonic
from bases.charts import Charts, ChartError, create_runtime_chart
from bases.collection import OldVersionCompatibility, safe_print
@@ -168,7 +167,7 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
'retries: {retries}'.format(freq=job.FREQ, retries=job.RETRIES_MAX - job.RETRIES))
while True:
- job.START_RUN = time()
+ job.START_RUN = monotonic()
job.NEXT_RUN = job.START_RUN - (job.START_RUN % job.FREQ) + job.FREQ + job.PENALTY
@@ -189,7 +188,7 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
if not self.manage_retries():
return
else:
- job.ELAPSED = int((time() - job.START_RUN) * 1e3)
+ job.ELAPSED = int((monotonic() - job.START_RUN) * 1e3)
job.PREV_UPDATE = job.START_RUN
job.RETRIES, job.PENALTY = 0, 0
safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
@@ -253,7 +252,7 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
self.debug('sleeping for {sleep_time} to reach frequency of {freq} sec'.format(sleep_time=sleep_time,
freq=job.FREQ + job.PENALTY))
sleep(sleep_time)
- job.START_RUN = time()
+ job.START_RUN = monotonic()
def get_data(self):
return self._get_data()
diff --git a/python.d/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
index 8d27ae660..e85455307 100644
--- a/python.d/python_modules/bases/FrameworkServices/SocketService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -1,9 +1,18 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import socket
+try:
+ import ssl
+except ImportError:
+ _TLS_SUPPORT = False
+else:
+ _TLS_SUPPORT = True
+
from bases.FrameworkServices.SimpleService import SimpleService
@@ -16,6 +25,9 @@ class SocketService(SimpleService):
self.unix_socket = None
self.dgram_socket = False
self.request = ''
+ self.tls = False
+ self.cert = None
+ self.key = None
self.__socket_config = None
self.__empty_request = "".encode()
SimpleService.__init__(self, configuration=configuration, name=name)
@@ -26,7 +38,7 @@ class SocketService(SimpleService):
message=message))
else:
if self.__socket_config is not None:
- af, sock_type, proto, canon_name, sa = self.__socket_config
+ _, _, _, _, sa = self.__socket_config
self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0],
port=sa[1],
message=message))
@@ -44,7 +56,7 @@ class SocketService(SimpleService):
self.error("Cannot create socket to 'None':")
return False
- af, sock_type, proto, canon_name, sa = res
+ af, sock_type, proto, _, sa = res
try:
self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
self._sock = socket.socket(af, sock_type, proto)
@@ -56,10 +68,24 @@ class SocketService(SimpleService):
self.__socket_config = None
return False
+ if self.tls:
+ try:
+ self.debug('Encapsulating socket with TLS')
+ self._sock = ssl.wrap_socket(self._sock,
+ keyfile=self.key,
+ certfile=self.cert,
+ server_side=False,
+ cert_reqs=ssl.CERT_NONE)
+ except (socket.error, ssl.SSLError) as error:
+ self.error('Failed to wrap socket.')
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
try:
self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
self._sock.connect(sa)
- except socket.error as error:
+ except (socket.error, ssl.SSLError) as error:
self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0],
port=sa[1],
error=error))
@@ -147,7 +173,7 @@ class SocketService(SimpleService):
pass
self._sock = None
- def _send(self):
+ def _send(self, request=None):
"""
Send request.
:return: boolean
@@ -155,8 +181,8 @@ class SocketService(SimpleService):
# Send request if it is needed
if self.request != self.__empty_request:
try:
- self.debug('sending request: {0}'.format(self.request))
- self._sock.send(self.request)
+ self.debug('sending request: {0}'.format(request or self.request))
+ self._sock.send(request or self.request)
except Exception as error:
self._socket_error('error sending request: {0}'.format(error))
self._disconnect()
@@ -197,7 +223,7 @@ class SocketService(SimpleService):
self.debug('final response: {0}'.format(data))
return data
- def _get_raw_data(self, raw=False):
+ def _get_raw_data(self, raw=False, request=None):
"""
Get raw data with low-level "socket" module.
:param raw: set `True` to return bytes
@@ -211,7 +237,7 @@ class SocketService(SimpleService):
return None
# Send request if it is needed
- if not self._send():
+ if not self._send(request):
return None
data = self._receive(raw)
@@ -249,6 +275,28 @@ class SocketService(SimpleService):
except (KeyError, TypeError):
self.debug('No port specified. Using: "{0}"'.format(self.port))
+ self.tls = bool(self.configuration.get('tls', self.tls))
+ if self.tls and not _TLS_SUPPORT:
+ self.warning('TLS requested but no TLS module found, disabling TLS support.')
+ self.tls = False
+ if _TLS_SUPPORT and not self.tls:
+ self.debug('No TLS preference specified, not using TLS.')
+
+ if self.tls and _TLS_SUPPORT:
+ self.key = self.configuration.get('tls_key_file')
+ self.cert = self.configuration.get('tls_cert_file')
+ if not self.cert:
+ # If there's not a valid certificate, clear the key too.
+ self.debug('No valid TLS client certificate configuration found.')
+ self.key = None
+ self.cert = None
+ elif not self.key:
+ # If a key isn't listed, the config may still be
+ # valid, because there may be a key attached to the
+ # certificate.
+ self.info('No TLS client key specified, assuming it\'s attached to the certificate.')
+ self.key = None
+
try:
self.request = str(self.configuration['request'])
except (KeyError, TypeError):
diff --git a/python.d/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
index bb340ba3b..856f38851 100644
--- a/python.d/python_modules/bases/FrameworkServices/UrlService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -2,6 +2,7 @@
# Description:
# Author: Pawel Krupa (paulfantom)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import urllib3
@@ -22,8 +23,13 @@ class UrlService(SimpleService):
self.proxy_user = self.configuration.get('proxy_user')
self.proxy_password = self.configuration.get('proxy_pass')
self.proxy_url = self.configuration.get('proxy_url')
+ self.method = self.configuration.get('method', 'GET')
self.header = self.configuration.get('header')
self.request_timeout = self.configuration.get('timeout', 1)
+ self.tls_verify = self.configuration.get('tls_verify')
+ self.tls_ca_file = self.configuration.get('tls_ca_file')
+ self.tls_key_file = self.configuration.get('tls_key_file')
+ self.tls_cert_file = self.configuration.get('tls_cert_file')
self._manager = None
def __make_headers(self, **header_kw):
@@ -60,9 +66,21 @@ class UrlService(SimpleService):
else:
manager = urllib3.PoolManager
params = dict(headers=header)
+ tls_cert_file = self.tls_cert_file
+ if tls_cert_file:
+ params['cert_file'] = tls_cert_file
+ # NOTE: key_file is useless without cert_file, but
+ # cert_file may include the key as well.
+ tls_key_file = self.tls_key_file
+ if tls_key_file:
+ params['key_file'] = tls_key_file
+ tls_ca_file = self.tls_ca_file
+ if tls_ca_file:
+ params['ca_certs'] = tls_ca_file
try:
url = header_kw.get('url') or self.url
- if url.startswith('https'):
+ if url.startswith('https') and not self.tls_verify and not tls_ca_file:
+ params['ca_certs'] = None
return manager(assert_hostname=False, cert_reqs='CERT_NONE', **params)
return manager(**params)
except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error:
@@ -77,13 +95,13 @@ class UrlService(SimpleService):
try:
status, data = self._get_raw_data_with_status(url, manager)
except (urllib3.exceptions.HTTPError, TypeError, AttributeError) as error:
- self.error('Url: {url}. Error: {error}'.format(url=url, error=error))
+ self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error))
return None
if status == 200:
- return data.decode()
+ return data
else:
- self.debug('Url: {url}. Http response status code: {code}'.format(url=url, code=status))
+ self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=status))
return None
def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True):
@@ -93,13 +111,15 @@ class UrlService(SimpleService):
"""
url = url or self.url
manager = manager or self._manager
- response = manager.request(method='GET',
+ response = manager.request(method=self.method,
url=url,
timeout=self.request_timeout,
retries=retries,
headers=manager.headers,
redirect=redirect)
- return response.status, response.data
+ if isinstance(response.data, str):
+ return response.status, response.data
+ return response.status, response.data.decode()
def check(self):
"""
diff --git a/python.d/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/urllib3/contrib/__init__.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py
+++ b/collectors/python.d.plugin/python_modules/bases/__init__.py
diff --git a/python.d/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
index 5394fbf64..2963739ec 100644
--- a/python.d/python_modules/bases/charts.py
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.collection import safe_print
-CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type']
+CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden']
DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
VARIABLE_PARAMS = ['id', 'value']
@@ -13,9 +14,9 @@ DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row',
CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} '' 'python.d.plugin' '{module_name}'\n"
+ "{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} 'obsolete'\n"
+ "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden}'\n"
@@ -151,6 +152,8 @@ class Chart:
id=self.params['id'])
if self.params.get('chart_type') not in CHART_TYPES:
self.params['chart_type'] = 'absolute'
+ hidden = str(self.params.get('hidden', ''))
+ self.params['hidden'] = 'hidden' if hidden == 'hidden' else ''
self.dimensions = list()
self.variables = set()
@@ -304,6 +307,12 @@ class Dimension:
return self.id == other
return self.id == other.id
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
def create(self):
return DIMENSION_CREATE.format(**self.params)
@@ -360,6 +369,9 @@ class ChartVariable:
return self.id == other.id
return False
+ def __ne__(self, other):
+ return not self == other
+
def __hash__(self):
return hash(repr(self))
diff --git a/python.d/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
index e03b4f58e..479a3b610 100644
--- a/python.d/python_modules/bases/collection.py
+++ b/collectors/python.d.plugin/python_modules/bases/collection.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
diff --git a/python.d/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py
index d18b9dcd0..9eb268ce7 100644
--- a/python.d/python_modules/bases/loaders.py
+++ b/collectors/python.d.plugin/python_modules/bases/loaders.py
@@ -1,18 +1,27 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import types
+
from sys import version_info
PY_VERSION = version_info[:2]
+try:
+ if PY_VERSION > (3, 1):
+ from pyyaml3 import SafeLoader as YamlSafeLoader
+ else:
+ from pyyaml2 import SafeLoader as YamlSafeLoader
+except ImportError:
+ from yaml import SafeLoader as YamlSafeLoader
+
+
if PY_VERSION > (3, 1):
- from pyyaml3 import SafeLoader as YamlSafeLoader
from importlib.machinery import SourceFileLoader
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
else:
- from pyyaml2 import SafeLoader as YamlSafeLoader
from imp import load_source as SourceFileLoader
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
@@ -26,6 +35,14 @@ def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
+def safe_load(stream):
+ loader = YamlSafeLoader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+
YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor)
diff --git a/python.d/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
index fc40b83d3..39be77a79 100644
--- a/python.d/python_modules/bases/loggers.py
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import logging
import traceback
diff --git a/python.d/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
index 76e19e13f..4d560e438 100644
--- a/python.d/python_modules/pyyaml2/__init__.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from error import *
diff --git a/python.d/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
index 06e5ac782..6b41b8067 100644
--- a/python.d/python_modules/pyyaml2/composer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Composer', 'ComposerError']
diff --git a/python.d/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
index 635faac3e..8ad1b90a7 100644
--- a/python.d/python_modules/pyyaml2/constructor.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError']
diff --git a/python.d/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
index 68dcd7519..2858ab479 100644
--- a/python.d/python_modules/pyyaml2/cyaml.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
diff --git a/python.d/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
index f811d2c91..3685cbeeb 100644
--- a/python.d/python_modules/pyyaml2/dumper.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
diff --git a/python.d/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
index e5bcdcccb..9a460a0fd 100644
--- a/python.d/python_modules/pyyaml2/emitter.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
diff --git a/python.d/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
index 577686db5..5466be721 100644
--- a/python.d/python_modules/pyyaml2/error.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
diff --git a/python.d/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
index f79ad389c..283452add 100644
--- a/python.d/python_modules/pyyaml2/events.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Abstract classes.
diff --git a/python.d/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
index 293ff467b..1c195531f 100644
--- a/python.d/python_modules/pyyaml2/loader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
diff --git a/python.d/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
index c4f070c41..ed2a1b43e 100644
--- a/python.d/python_modules/pyyaml2/nodes.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
diff --git a/python.d/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
index f9e3057f3..97ba08337 100644
--- a/python.d/python_modules/pyyaml2/parser.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
diff --git a/python.d/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
index 3249e6b9f..8d422954e 100644
--- a/python.d/python_modules/pyyaml2/reader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
diff --git a/python.d/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
index 5f4fc70db..0a1404eca 100644
--- a/python.d/python_modules/pyyaml2/representer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
diff --git a/python.d/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
index 6b5ab8759..49922debf 100644
--- a/python.d/python_modules/pyyaml2/resolver.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseResolver', 'Resolver']
diff --git a/python.d/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
index 5228fad65..971da6127 100644
--- a/python.d/python_modules/pyyaml2/scanner.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Scanner produces tokens of the following types:
# STREAM-START
diff --git a/python.d/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
index 0bf1e96dc..15fdbb0c0 100644
--- a/python.d/python_modules/pyyaml2/serializer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Serializer', 'SerializerError']
diff --git a/python.d/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
index 4d0b48a39..c5c4fb116 100644
--- a/python.d/python_modules/pyyaml2/tokens.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
class Token(object):
def __init__(self, start_mark, end_mark):
diff --git a/python.d/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
index a5e20f94d..a884b33cf 100644
--- a/python.d/python_modules/pyyaml3/__init__.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from .error import *
diff --git a/python.d/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
index d5c6a7acd..c418bba91 100644
--- a/python.d/python_modules/pyyaml3/composer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Composer', 'ComposerError']
diff --git a/python.d/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
index 981543aeb..ee09a7a7e 100644
--- a/python.d/python_modules/pyyaml3/constructor.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError']
diff --git a/python.d/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
index d5cb87e99..e6c16d894 100644
--- a/python.d/python_modules/pyyaml3/cyaml.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
diff --git a/python.d/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
index 0b6912877..ba590c6e6 100644
--- a/python.d/python_modules/pyyaml3/dumper.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
diff --git a/python.d/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
index 34cb145a5..d4be65a8e 100644
--- a/python.d/python_modules/pyyaml3/emitter.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
diff --git a/python.d/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
index b796b4dc5..5fec7d449 100644
--- a/python.d/python_modules/pyyaml3/error.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
diff --git a/python.d/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
index f79ad389c..283452add 100644
--- a/python.d/python_modules/pyyaml3/events.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Abstract classes.
diff --git a/python.d/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
index 08c8f01b3..7ef6cf815 100644
--- a/python.d/python_modules/pyyaml3/loader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
diff --git a/python.d/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
index c4f070c41..ed2a1b43e 100644
--- a/python.d/python_modules/pyyaml3/nodes.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
diff --git a/python.d/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
index 13a5995d2..bcec7f994 100644
--- a/python.d/python_modules/pyyaml3/parser.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
diff --git a/python.d/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
index f70e920f4..0a515fd64 100644
--- a/python.d/python_modules/pyyaml3/reader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
diff --git a/python.d/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
index 67cd6fd25..756a18dcc 100644
--- a/python.d/python_modules/pyyaml3/representer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
diff --git a/python.d/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
index 0eece2582..50945e04d 100644
--- a/python.d/python_modules/pyyaml3/resolver.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseResolver', 'Resolver']
diff --git a/python.d/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
index 494d975ba..b55854e8b 100644
--- a/python.d/python_modules/pyyaml3/scanner.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Scanner produces tokens of the following types:
# STREAM-START
diff --git a/python.d/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
index fe911e67a..1ba2f7f9d 100644
--- a/python.d/python_modules/pyyaml3/serializer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Serializer', 'SerializerError']
diff --git a/python.d/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
index 4d0b48a39..c5c4fb116 100644
--- a/python.d/python_modules/pyyaml3/tokens.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
class Token(object):
def __init__(self, start_mark, end_mark):
diff --git a/python.d/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/urllib3/packages/backports/__init__.py
+++ b/collectors/python.d.plugin/python_modules/third_party/__init__.py
diff --git a/python.d/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
index 1d868f0e2..f10cd6209 100644
--- a/python.d/python_modules/third_party/lm_sensors.py
+++ b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: LGPL-2.1
"""
@package sensors.py
Python Bindings for libsensors3
@@ -254,4 +255,4 @@ class SubFeatureIterator:
return subfeature
def next(self): # python2 compability
- return self.__next__() \ No newline at end of file
+ return self.__next__()
diff --git a/python.d/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
index d0b97d47c..589401b8f 100644
--- a/python.d/python_modules/third_party/ordereddict.py
+++ b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
@@ -1,24 +1,6 @@
# Copyright (c) 2009 Raymond Hettinger
#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction,
-# including without limitation the rights to use, copy, modify, merge,
-# publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
+# SPDX-License-Identifier: MIT
from UserDict import DictMixin
diff --git a/python.d/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
index 26493ecb9..3add84816 100644
--- a/python.d/python_modules/urllib3/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
diff --git a/python.d/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
index 4849ddecd..c1d2fad36 100644
--- a/python.d/python_modules/urllib3/_collections.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from collections import Mapping, MutableMapping
try:
diff --git a/python.d/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py
index c0d832998..f757493c7 100644
--- a/python.d/python_modules/urllib3/connection.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/connection.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import datetime
import logging
diff --git a/python.d/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
index b4f1166a6..90e4c86a5 100644
--- a/python.d/python_modules/urllib3/connectionpool.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import errno
import logging
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
index e26b84086..bb826673f 100644
--- a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
This module uses ctypes to bind a whole bunch of functions and constants from
SecureTransport. The goal here is to provide the low-level API to
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
index 5e3494bce..0f79a1372 100644
--- a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
Low-level helpers for the SecureTransport bindings.
diff --git a/python.d/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
index 814b0222d..e74589fa8 100644
--- a/python.d/python_modules/urllib3/contrib/appengine.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
diff --git a/python.d/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
index 642e99ed2..3f8c9ebf5 100644
--- a/python.d/python_modules/urllib3/contrib/ntlmpool.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
NTLM authenticating pool, contributed by erikcederstran
diff --git a/python.d/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
index 6645dbaa9..8d373507d 100644
--- a/python.d/python_modules/urllib3/contrib/pyopenssl.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
diff --git a/python.d/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
index 72b23ab1c..fcc30118c 100644
--- a/python.d/python_modules/urllib3/contrib/securetransport.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
SecureTranport support for urllib3 via ctypes.
diff --git a/python.d/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
index 39e92fde1..1cb79285b 100644
--- a/python.d/python_modules/urllib3/contrib/socks.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
"""
This module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
diff --git a/python.d/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
index 6c4be5810..a71cabe06 100644
--- a/python.d/python_modules/urllib3/exceptions.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from .packages.six.moves.http_client import (
IncompleteRead as httplib_IncompleteRead
diff --git a/python.d/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py
index 19b0ae0c8..de7577b74 100644
--- a/python.d/python_modules/urllib3/fields.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/fields.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import email.utils
import mimetypes
diff --git a/python.d/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
index cd11cee46..3febc9cfe 100644
--- a/python.d/python_modules/urllib3/filepost.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import codecs
diff --git a/python.d/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
index 170e974c1..170e974c1 100644
--- a/python.d/python_modules/urllib3/packages/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
diff --git a/python.d/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
index 75b80dcf8..8ab122f8b 100644
--- a/python.d/python_modules/urllib3/packages/backports/makefile.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
"""
backports.makefile
~~~~~~~~~~~~~~~~~~
diff --git a/python.d/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
index 4479363cc..9f7c0e6b8 100644
--- a/python.d/python_modules/urllib3/packages/ordered_dict.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
@@ -2,6 +2,7 @@
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
+# SPDX-License-Identifier: MIT
try:
from thread import get_ident as _get_ident
except ImportError:
diff --git a/python.d/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
index 190c0239c..31df5012b 100644
--- a/python.d/python_modules/urllib3/packages/six.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
@@ -2,23 +2,7 @@
# Copyright (c) 2010-2015 Benjamin Peterson
#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
index d6594eb26..2aeeeff91 100644
--- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
import sys
try:
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
index 1fd42f38a..647e081da 100644
--- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
@@ -1,7 +1,6 @@
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
-# Note: This file is under the PSF license as the code comes from the python
-# stdlib. http://docs.python.org/3/license.html
+# SPDX-License-Identifier: Python-2.0
import re
import sys
diff --git a/python.d/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
index 4ae91744d..adea9bc01 100644
--- a/python.d/python_modules/urllib3/poolmanager.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import collections
import functools
diff --git a/python.d/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py
index c0fddff04..f78331975 100644
--- a/python.d/python_modules/urllib3/request.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/request.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from .filepost import encode_multipart_formdata
diff --git a/python.d/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py
index 408d9996a..cf14a3076 100644
--- a/python.d/python_modules/urllib3/response.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/response.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from contextlib import contextmanager
import zlib
diff --git a/python.d/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
index 2f2770b62..bba628d98 100644
--- a/python.d/python_modules/urllib3/util/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
diff --git a/python.d/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
index bf699cfd0..3bd69e8fa 100644
--- a/python.d/python_modules/urllib3/util/connection.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import socket
from .wait import wait_for_read
diff --git a/python.d/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
index 3ddfcd559..18f27b032 100644
--- a/python.d/python_modules/urllib3/util/request.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from base64 import b64encode
diff --git a/python.d/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
index 67cf730ab..e4cda93d4 100644
--- a/python.d/python_modules/urllib3/util/response.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from ..packages.six.moves import http_client as httplib
diff --git a/python.d/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
index c603cb490..61e63afec 100644
--- a/python.d/python_modules/urllib3/util/retry.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import time
import logging
diff --git a/python.d/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
index d75cb266b..c0997b1a2 100644
--- a/python.d/python_modules/urllib3/util/selectors.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
diff --git a/python.d/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
index 33d428ed8..ece3ec39e 100644
--- a/python.d/python_modules/urllib3/util/ssl_.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import errno
import warnings
diff --git a/python.d/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
index cec817e6e..4041cf9b9 100644
--- a/python.d/python_modules/urllib3/util/timeout.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
diff --git a/python.d/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
index 6b6f9968d..99fd6534a 100644
--- a/python.d/python_modules/urllib3/util/url.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from collections import namedtuple
diff --git a/python.d/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
index cb396e508..21e72979c 100644
--- a/python.d/python_modules/urllib3/util/wait.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from .selectors import (
HAS_SELECT,
DefaultSelector,
diff --git a/python.d/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
index b8847e9f8..8298b4032 100644
--- a/python.d/rabbitmq.chart.py
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: rabbitmq netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from collections import namedtuple
from json import loads
@@ -20,91 +21,104 @@ retries = 60
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
-NODE_STATS = ['fd_used',
- 'mem_used',
- 'sockets_used',
- 'proc_used',
- 'disk_free',
- 'run_queue'
- ]
-OVERVIEW_STATS = ['object_totals.channels',
- 'object_totals.consumers',
- 'object_totals.connections',
- 'object_totals.queues',
- 'object_totals.exchanges',
- 'queue_totals.messages_ready',
- 'queue_totals.messages_unacknowledged',
- 'message_stats.ack',
- 'message_stats.redeliver',
- 'message_stats.deliver',
- 'message_stats.publish'
- ]
-ORDER = ['queued_messages', 'message_rates', 'global_counts',
- 'file_descriptors', 'socket_descriptors', 'erlang_processes', 'erlang_run_queue', 'memory', 'disk_space']
+NODE_STATS = [
+ 'fd_used',
+ 'mem_used',
+ 'sockets_used',
+ 'proc_used',
+ 'disk_free',
+ 'run_queue'
+]
+
+OVERVIEW_STATS = [
+ 'object_totals.channels',
+ 'object_totals.consumers',
+ 'object_totals.connections',
+ 'object_totals.queues',
+ 'object_totals.exchanges',
+ 'queue_totals.messages_ready',
+ 'queue_totals.messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.redeliver',
+ 'message_stats.deliver',
+ 'message_stats.publish'
+]
+
+ORDER = [
+ 'queued_messages',
+ 'message_rates',
+ 'global_counts',
+ 'file_descriptors',
+ 'socket_descriptors',
+ 'erlang_processes',
+ 'erlang_run_queue',
+ 'memory',
+ 'disk_space'
+]
CHARTS = {
'file_descriptors': {
- 'options': [None, 'File Descriptors', 'descriptors', 'overview',
- 'rabbitmq.file_descriptors', 'line'],
+ 'options': [None, 'File Descriptors', 'descriptors', 'overview', 'rabbitmq.file_descriptors', 'line'],
'lines': [
['fd_used', 'used', 'absolute']
- ]},
+ ]
+ },
'memory': {
- 'options': [None, 'Memory', 'MB', 'overview',
- 'rabbitmq.memory', 'line'],
+ 'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'line'],
'lines': [
['mem_used', 'used', 'absolute', 1, 1024 << 10]
- ]},
+ ]
+ },
'disk_space': {
- 'options': [None, 'Disk Space', 'GB', 'overview',
- 'rabbitmq.disk_space', 'line'],
+ 'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'line'],
'lines': [
['disk_free', 'free', 'absolute', 1, 1024 ** 3]
- ]},
+ ]
+ },
'socket_descriptors': {
- 'options': [None, 'Socket Descriptors', 'descriptors', 'overview',
- 'rabbitmq.sockets', 'line'],
+ 'options': [None, 'Socket Descriptors', 'descriptors', 'overview', 'rabbitmq.sockets', 'line'],
'lines': [
['sockets_used', 'used', 'absolute']
- ]},
+ ]
+ },
'erlang_processes': {
- 'options': [None, 'Erlang Processes', 'processes', 'overview',
- 'rabbitmq.processes', 'line'],
+ 'options': [None, 'Erlang Processes', 'processes', 'overview', 'rabbitmq.processes', 'line'],
'lines': [
['proc_used', 'used', 'absolute']
- ]},
+ ]
+ },
'erlang_run_queue': {
- 'options': [None, 'Erlang Run Queue', 'processes', 'overview',
- 'rabbitmq.erlang_run_queue', 'line'],
+ 'options': [None, 'Erlang Run Queue', 'processes', 'overview', 'rabbitmq.erlang_run_queue', 'line'],
'lines': [
- ['run_queue',' length', 'absolute']
- ]},
+ ['run_queue', 'length', 'absolute']
+ ]
+ },
'global_counts': {
- 'options': [None, 'Global Counts', 'counts', 'overview',
- 'rabbitmq.global_counts', 'line'],
+ 'options': [None, 'Global Counts', 'counts', 'overview', 'rabbitmq.global_counts', 'line'],
'lines': [
['object_totals_channels', 'channels', 'absolute'],
['object_totals_consumers', 'consumers', 'absolute'],
['object_totals_connections', 'connections', 'absolute'],
['object_totals_queues', 'queues', 'absolute'],
['object_totals_exchanges', 'exchanges', 'absolute']
- ]},
+ ]
+ },
'queued_messages': {
- 'options': [None, 'Queued Messages', 'messages', 'overview',
- 'rabbitmq.queued_messages', 'stacked'],
+ 'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'],
'lines': [
['queue_totals_messages_ready', 'ready', 'absolute'],
['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute']
- ]},
+ ]
+ },
'message_rates': {
- 'options': [None, 'Message Rates', 'messages/s', 'overview',
- 'rabbitmq.message_rates', 'stacked'],
+ 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'stacked'],
'lines': [
['message_stats_ack', 'ack', 'incremental'],
['message_stats_redeliver', 'redeliver', 'incremental'],
['message_stats_deliver', 'deliver', 'incremental'],
['message_stats_publish', 'publish', 'incremental']
- ]}
+ ]
+ }
}
diff --git a/python.d/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
index 8c0330ec6..1d8e35050 100644
--- a/python.d/retroshare.chart.py
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: RetroShare netdata python.d module
# Authors: sehraf
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
@@ -20,19 +21,22 @@ CHARTS = {
'lines': [
['bandwidth_up_kb', 'Upload'],
['bandwidth_down_kb', 'Download']
- ]},
+ ]
+ },
'peers': {
'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
'lines': [
['peers_all', 'All friends'],
['peers_connected', 'Connected friends']
- ]},
+ ]
+ },
'dht': {
'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
'lines': [
['dht_size_all', 'DHT nodes estimated'],
['dht_size_rs', 'RS nodes estimated']
- ]}
+ ]
+ }
}
diff --git a/python.d/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
index 06e420b68..69d2bfe99 100644
--- a/python.d/sensors.chart.py
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: sensors netdata python.d plugin
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.SimpleService import SimpleService
from third_party import lm_sensors as sensors
@@ -16,37 +17,44 @@ CHARTS = {
'options': [None, ' temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'voltage': {
'options': [None, ' voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'current': {
'options': [None, ' current', 'Ampere', 'current', 'sensors.current', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'power': {
'options': [None, ' power', 'Watt', 'power', 'sensors.power', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000000]
- ]},
+ ]
+ },
'fan': {
'options': [None, ' fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'energy': {
'options': [None, ' energy', 'Joule', 'energy', 'sensors.energy', 'areastack'],
'lines': [
[None, None, 'incremental', 1, 1000000]
- ]},
+ ]
+ },
'humidity': {
'options': [None, ' humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]}
+ ]
+ }
}
LIMITS = {
@@ -94,7 +102,7 @@ class Service(SimpleService):
limit = LIMITS[type_name]
if val < limit[0] or val > limit[1]:
continue
- data[prefix + "_" + str(feature.name.decode())] = int(val * 1000)
+ data[prefix + '_' + str(feature.name.decode())] = int(val * 1000)
except Exception as error:
self.error(error)
return None
@@ -114,7 +122,7 @@ class Service(SimpleService):
continue
if TYPE_MAP[feature.type] == sensor:
# create chart
- name = chip_name + "_" + TYPE_MAP[feature.type]
+ name = chip_name + '_' + TYPE_MAP[feature.type]
if name not in self.order:
self.order.append(name)
chart_def = list(CHARTS[sensor]['options'])
@@ -122,7 +130,7 @@ class Service(SimpleService):
self.definitions[name] = {'options': chart_def}
self.definitions[name]['lines'] = []
line = list(CHARTS[sensor]['lines'][0])
- line[0] = chip_name + "_" + str(feature.name.decode())
+ line[0] = chip_name + '_' + str(feature.name.decode())
line[1] = sensors.get_label(chip, feature)
self.definitions[name]['lines'].append(line)
@@ -136,4 +144,3 @@ class Service(SimpleService):
self.create_definitions()
return True
-
diff --git a/python.d/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
index 07ad88cd4..21dbccecc 100644
--- a/python.d/smartd_log.chart.py
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: smart netdata python.d module
# Author: l2isbad, vorph1
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
import re
@@ -115,7 +116,7 @@ def chart_template(chart_name):
chart_name: {
'options': [None, title, units, family, 'smartd_log.' + chart_name, 'line'],
'lines': []
- }
+ }
}
@@ -184,6 +185,12 @@ class Disk:
return self.name == other.name
return self.name == other
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
@handle_os_error
def is_active(self):
return (time() - os.path.getmtime(self.log_file.path)) / 60 < self.age
diff --git a/python.d/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
index 60ad0cccb..7df37e1d0 100644
--- a/python.d/springboot.chart.py
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: tomcat netdata python.d module
# Author: Wing924
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
from bases.FrameworkServices.UrlService import UrlService
@@ -17,54 +18,61 @@ DEFAULT_CHARTS = {
'response_code': {
'options': [None, "Response Codes", "requests/s", "response", "springboot.response_code", "stacked"],
'lines': [
- ["resp_other", 'Other', 'incremental'],
- ["resp_1xx", '1xx', 'incremental'],
- ["resp_2xx", '2xx', 'incremental'],
- ["resp_3xx", '3xx', 'incremental'],
- ["resp_4xx", '4xx', 'incremental'],
- ["resp_5xx", '5xx', 'incremental'],
- ]},
+ ["resp_other", 'Other', 'incremental'],
+ ["resp_1xx", '1xx', 'incremental'],
+ ["resp_2xx", '2xx', 'incremental'],
+ ["resp_3xx", '3xx', 'incremental'],
+ ["resp_4xx", '4xx', 'incremental'],
+ ["resp_5xx", '5xx', 'incremental'],
+ ]
+ },
'threads': {
'options': [None, "Threads", "current threads", "threads", "springboot.threads", "area"],
'lines': [
["threads_daemon", 'daemon', 'absolute'],
["threads", 'total', 'absolute'],
- ]},
+ ]
+ },
'gc_time': {
'options': [None, "GC Time", "milliseconds", "garbage collection", "springboot.gc_time", "stacked"],
'lines': [
- ["gc_copy_time", 'Copy', 'incremental'],
- ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'],
- ["gc_parnew_time", 'ParNew', 'incremental'],
- ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'],
- ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'],
- ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'],
- ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'],
- ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'],
- ]},
+ ["gc_copy_time", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_time", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'],
+ ]
+ },
'gc_ope': {
'options': [None, "GC Operations", "operations/s", "garbage collection", "springboot.gc_ope", "stacked"],
'lines': [
- ["gc_copy_count", 'Copy', 'incremental'],
- ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'],
- ["gc_parnew_count", 'ParNew', 'incremental'],
- ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'],
- ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'],
- ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'],
- ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'],
- ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'],
- ]},
+ ["gc_copy_count", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_count", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'],
+ ]
+ },
'heap': {
'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"],
'lines': [
["heap_committed", 'committed', "absolute"],
["heap_used", 'used', "absolute"],
- ]},
+ ]
+ }
}
+
class ExtraChartError(ValueError):
pass
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
@@ -87,11 +95,11 @@ class Service(UrlService):
return None
result = {
- 'resp_1xx': 0,
- 'resp_2xx': 0,
- 'resp_3xx': 0,
- 'resp_4xx': 0,
- 'resp_5xx': 0,
+ 'resp_1xx': 0,
+ 'resp_2xx': 0,
+ 'resp_3xx': 0,
+ 'resp_4xx': 0,
+ 'resp_5xx': 0,
'resp_other': 0,
}
@@ -121,14 +129,14 @@ class Service(UrlService):
self.order.append(extra['id'])
def _add_extra_chart(self, chart):
- chart_id = chart.get('id', None) or die('id is not defined in extra chart')
- options = chart.get('options', None) or die('option is not defined in extra chart: %s' % chart_id)
- lines = chart.get('lines', None) or die('lines is not defined in extra chart: %s' % chart_id)
-
- title = options.get('title', None) or die('title is missing: %s' % chart_id)
- units = options.get('units', None) or die('units is missing: %s' % chart_id)
- family = options.get('family', title)
- context = options.get('context', 'springboot.' + title)
+ chart_id = chart.get('id', None) or self.die('id is not defined in extra chart')
+ options = chart.get('options', None) or self.die('option is not defined in extra chart: %s' % chart_id)
+ lines = chart.get('lines', None) or self.die('lines is not defined in extra chart: %s' % chart_id)
+
+ title = options.get('title', None) or self.die('title is missing: %s' % chart_id)
+ units = options.get('units', None) or self.die('units is missing: %s' % chart_id)
+ family = options.get('family', title)
+ context = options.get('context', 'springboot.' + title)
charttype = options.get('charttype', 'line')
result = {
@@ -137,11 +145,11 @@ class Service(UrlService):
}
for line in lines:
- dimension = line.get('dimension', None) or die('dimension is missing: %s' % chart_id)
- name = line.get('name', dimension)
- algorithm = line.get('algorithm', 'absolute')
+ dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
+ name = line.get('name', dimension)
+ algorithm = line.get('algorithm', 'absolute')
multiplier = line.get('multiplier', 1)
- divisor = line.get('divisor', 1)
+ divisor = line.get('divisor', 1)
result['lines'].append([dimension, name, algorithm, multiplier, divisor])
self.definitions[chart_id] = result
diff --git a/python.d/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
index ba8f982ff..fd54168f0 100644
--- a/python.d/squid.chart.py
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: squid netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.SocketService import SocketService
@@ -15,31 +16,35 @@ ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests']
CHARTS = {
'clients_net': {
- 'options': [None, "Squid Client Bandwidth", "kilobits/s", "clients", "squid.clients_net", "area"],
+ 'options': [None, 'Squid Client Bandwidth', 'kilobits/s', 'clients', 'squid.clients_net', 'area'],
'lines': [
- ["client_http_kbytes_in", "in", "incremental", 8, 1],
- ["client_http_kbytes_out", "out", "incremental", -8, 1],
- ["client_http_hit_kbytes_out", "hits", "incremental", -8, 1]
- ]},
+ ['client_http_kbytes_in', 'in', 'incremental', 8, 1],
+ ['client_http_kbytes_out', 'out', 'incremental', -8, 1],
+ ['client_http_hit_kbytes_out', 'hits', 'incremental', -8, 1]
+ ]
+ },
'clients_requests': {
- 'options': [None, "Squid Client Requests", "requests/s", "clients", "squid.clients_requests", 'line'],
+ 'options': [None, 'Squid Client Requests', 'requests/s', 'clients', 'squid.clients_requests', 'line'],
'lines': [
- ["client_http_requests", "requests", "incremental"],
- ["client_http_hits", "hits", "incremental"],
- ["client_http_errors", "errors", "incremental", -1, 1]
- ]},
+ ['client_http_requests', 'requests', 'incremental'],
+ ['client_http_hits', 'hits', 'incremental'],
+ ['client_http_errors', 'errors', 'incremental', -1, 1]
+ ]
+ },
'servers_net': {
- 'options': [None, "Squid Server Bandwidth", "kilobits/s", "servers", "squid.servers_net", "area"],
+ 'options': [None, 'Squid Server Bandwidth', 'kilobits/s', 'servers', 'squid.servers_net', 'area'],
'lines': [
- ["server_all_kbytes_in", "in", "incremental", 8, 1],
- ["server_all_kbytes_out", "out", "incremental", -8, 1]
- ]},
+ ['server_all_kbytes_in', 'in', 'incremental', 8, 1],
+ ['server_all_kbytes_out', 'out', 'incremental', -8, 1]
+ ]
+ },
'servers_requests': {
- 'options': [None, "Squid Server Requests", "requests/s", "servers", "squid.servers_requests", 'line'],
+ 'options': [None, 'Squid Server Requests', 'requests/s', 'servers', 'squid.servers_requests', 'line'],
'lines': [
- ["server_all_requests", "requests", "incremental"],
- ["server_all_errors", "errors", "incremental", -1, 1]
- ]}
+ ['server_all_requests', 'requests', 'incremental'],
+ ['server_all_errors', 'errors', 'incremental', -1, 1]
+ ]
+ }
}
@@ -47,8 +52,8 @@ class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
self._keep_alive = True
- self.request = ""
- self.host = "localhost"
+ self.request = ''
+ self.host = 'localhost'
self.port = 3128
self.order = ORDER
self.definitions = CHARTS
@@ -62,43 +67,43 @@ class Service(SocketService):
data = dict()
try:
- raw = ""
+ raw = ''
for tmp in response.split('\r\n'):
- if tmp.startswith("sample_time"):
+ if tmp.startswith('sample_time'):
raw = tmp
break
if raw.startswith('<'):
- self.error("invalid data received")
+ self.error('invalid data received')
return None
for row in raw.split('\n'):
- if row.startswith(("client", "server.all")):
- tmp = row.split("=")
+ if row.startswith(('client', 'server.all')):
+ tmp = row.split('=')
data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
except (ValueError, AttributeError, TypeError):
- self.error("invalid data received")
+ self.error('invalid data received')
return None
if not data:
- self.error("no data received")
+ self.error('no data received')
return None
return data
def _check_raw_data(self, data):
header = data[:1024].lower()
- if "connection: keep-alive" in header:
+ if 'connection: keep-alive' in header:
self._keep_alive = True
else:
self._keep_alive = False
- if data[-7:] == "\r\n0\r\n\r\n" and "transfer-encoding: chunked" in header: # HTTP/1.1 response
- self.debug("received full response from squid")
+ if data[-7:] == '\r\n0\r\n\r\n' and 'transfer-encoding: chunked' in header: # HTTP/1.1 response
+ self.debug('received full response from squid')
return True
- self.debug("waiting more data from squid")
+ self.debug('waiting more data from squid')
return False
def check(self):
@@ -109,10 +114,10 @@ class Service(SocketService):
self._parse_config()
# format request
req = self.request.decode()
- if not req.startswith("GET"):
- req = "GET " + req
- if not req.endswith(" HTTP/1.1\r\n\r\n"):
- req += " HTTP/1.1\r\n\r\n"
+ if not req.startswith('GET'):
+ req = 'GET ' + req
+ if not req.endswith(' HTTP/1.1\r\n\r\n'):
+ req += ' HTTP/1.1\r\n\r\n'
self.request = req.encode()
if self._get_data() is not None:
return True
diff --git a/python.d/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
index a570d5643..3c2d0ed40 100644
--- a/python.d/tomcat.chart.py
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description: tomcat netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# Author: Wei He (Wing924)
+# SPDX-License-Identifier: GPL-3.0-or-later
import xml.etree.ElementTree as ET
@@ -16,67 +18,75 @@ ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden
CHARTS = {
'accesses': {
- 'options': [None, "Requests", "requests/s", "statistics", "tomcat.accesses", "area"],
+ 'options': [None, 'Requests', 'requests/s', 'statistics', 'tomcat.accesses', 'area'],
'lines': [
- ["requestCount", 'accesses', 'incremental'],
- ["errorCount", 'errors', 'incremental'],
- ]},
+ ['requestCount', 'accesses', 'incremental'],
+ ['errorCount', 'errors', 'incremental'],
+ ]
+ },
'bandwidth': {
- 'options': [None, "Bandwidth", "KB/s", "statistics", "tomcat.bandwidth", "area"],
+ 'options': [None, 'Bandwidth', 'KB/s', 'statistics', 'tomcat.bandwidth', 'area'],
'lines': [
- ["bytesSent", 'sent', 'incremental', 1, 1024],
- ["bytesReceived", 'received', 'incremental', 1, 1024],
- ]},
+ ['bytesSent', 'sent', 'incremental', 1, 1024],
+ ['bytesReceived', 'received', 'incremental', 1, 1024],
+ ]
+ },
'processing_time': {
- 'options': [None, "processing time", "seconds", "statistics", "tomcat.processing_time", "area"],
+ 'options': [None, 'processing time', 'seconds', 'statistics', 'tomcat.processing_time', 'area'],
'lines': [
- ["processingTime", 'processing time', 'incremental', 1, 1000]
- ]},
+ ['processingTime', 'processing time', 'incremental', 1, 1000]
+ ]
+ },
'threads': {
- 'options': [None, "Threads", "current threads", "statistics", "tomcat.threads", "area"],
+ 'options': [None, 'Threads', 'current threads', 'statistics', 'tomcat.threads', 'area'],
'lines': [
- ["currentThreadCount", 'current', "absolute"],
- ["currentThreadsBusy", 'busy', "absolute"]
- ]},
+ ['currentThreadCount', 'current', 'absolute'],
+ ['currentThreadsBusy', 'busy', 'absolute']
+ ]
+ },
'jvm': {
- 'options': [None, "JVM Memory Pool Usage", "MB", "memory", "tomcat.jvm", "stacked"],
+ 'options': [None, 'JVM Memory Pool Usage', 'MB', 'memory', 'tomcat.jvm', 'stacked'],
'lines': [
- ["free", 'free', "absolute", 1, 1048576],
- ["eden_used", 'eden', "absolute", 1, 1048576],
- ["survivor_used", 'survivor', "absolute", 1, 1048576],
- ["tenured_used", 'tenured', "absolute", 1, 1048576],
- ["code_cache_used", 'code cache', "absolute", 1, 1048576],
- ["compressed_used", 'compressed', "absolute", 1, 1048576],
- ["metaspace_used", 'metaspace', "absolute", 1, 1048576],
- ]},
+ ['free', 'free', 'absolute', 1, 1048576],
+ ['eden_used', 'eden', 'absolute', 1, 1048576],
+ ['survivor_used', 'survivor', 'absolute', 1, 1048576],
+ ['tenured_used', 'tenured', 'absolute', 1, 1048576],
+ ['code_cache_used', 'code cache', 'absolute', 1, 1048576],
+ ['compressed_used', 'compressed', 'absolute', 1, 1048576],
+ ['metaspace_used', 'metaspace', 'absolute', 1, 1048576],
+ ]
+ },
'jvm_eden': {
- 'options': [None, "Eden Memory Usage", "MB", "memory", "tomcat.jvm_eden", "area"],
+ 'options': [None, 'Eden Memory Usage', 'MB', 'memory', 'tomcat.jvm_eden', 'area'],
'lines': [
- ["eden_used", 'used', "absolute", 1, 1048576],
- ["eden_commited", 'commited', "absolute", 1, 1048576],
- ["eden_max", 'max', "absolute", 1, 1048576]
- ]},
+ ['eden_used', 'used', 'absolute', 1, 1048576],
+ ['eden_committed', 'committed', 'absolute', 1, 1048576],
+ ['eden_max', 'max', 'absolute', 1, 1048576]
+ ]
+ },
'jvm_survivor': {
- 'options': [None, "Survivor Memory Usage", "MB", "memory", "tomcat.jvm_survivor", "area"],
+ 'options': [None, 'Survivor Memory Usage', 'MB', 'memory', 'tomcat.jvm_survivor', 'area'],
'lines': [
- ["survivor_used", 'used', "absolute", 1, 1048576],
- ["survivor_commited", 'commited', "absolute", 1, 1048576],
- ["survivor_max", 'max', "absolute", 1, 1048576]
- ]},
+ ['survivor_used', 'used', 'absolute', 1, 1048576],
+ ['survivor_committed', 'committed', 'absolute', 1, 1048576],
+ ['survivor_max', 'max', 'absolute', 1, 1048576]
+ ]
+ },
'jvm_tenured': {
- 'options': [None, "Tenured Memory Usage", "MB", "memory", "tomcat.jvm_tenured", "area"],
+ 'options': [None, 'Tenured Memory Usage', 'MB', 'memory', 'tomcat.jvm_tenured', 'area'],
'lines': [
- ["tenured_used", 'used', "absolute", 1, 1048576],
- ["tenured_commited", 'commited', "absolute", 1, 1048576],
- ["tenured_max", 'max', "absolute", 1, 1048576]
- ]},
+ ['tenured_used', 'used', 'absolute', 1, 1048576],
+ ['tenured_committed', 'committed', 'absolute', 1, 1048576],
+ ['tenured_max', 'max', 'absolute', 1, 1048576]
+ ]
+ }
}
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', "http://127.0.0.1:8080/manager/status?XML=true")
+ self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
self.connector_name = self.configuration.get('connector_name', None)
self.order = ORDER
self.definitions = CHARTS
@@ -115,27 +125,27 @@ class Service(UrlService):
name = pool.get('name')
if 'Eden Space' in name:
data['eden_used'] = pool.get('usageUsed')
- data['eden_commited'] = pool.get('usageCommitted')
+ data['eden_committed'] = pool.get('usageCommitted')
data['eden_max'] = pool.get('usageMax')
elif 'Survivor Space' in name:
data['survivor_used'] = pool.get('usageUsed')
- data['survivor_commited'] = pool.get('usageCommitted')
+ data['survivor_committed'] = pool.get('usageCommitted')
data['survivor_max'] = pool.get('usageMax')
elif 'Tenured Gen' in name or 'Old Gen' in name:
data['tenured_used'] = pool.get('usageUsed')
- data['tenured_commited'] = pool.get('usageCommitted')
+ data['tenured_committed'] = pool.get('usageCommitted')
data['tenured_max'] = pool.get('usageMax')
elif name == 'Code Cache':
data['code_cache_used'] = pool.get('usageUsed')
- data['code_cache_commited'] = pool.get('usageCommitted')
+ data['code_cache_committed'] = pool.get('usageCommitted')
data['code_cache_max'] = pool.get('usageMax')
elif name == 'Compressed':
data['compressed_used'] = pool.get('usageUsed')
- data['compressed_commited'] = pool.get('usageCommitted')
+ data['compressed_committed'] = pool.get('usageCommitted')
data['compressed_max'] = pool.get('usageMax')
elif name == 'Metaspace':
data['metaspace_used'] = pool.get('usageUsed')
- data['metaspace_commited'] = pool.get('usageCommitted')
+ data['metaspace_committed'] = pool.get('usageCommitted')
data['metaspace_max'] = pool.get('usageMax')
if connector:
@@ -145,9 +155,9 @@ class Service(UrlService):
request_info = connector.find('requestInfo')
data['processingTime'] = request_info.get('processingTime')
- data['requestCount'] = request_info.get('requestCount')
- data['errorCount'] = request_info.get('errorCount')
- data['bytesReceived'] = request_info.get('bytesReceived')
- data['bytesSent'] = request_info.get('bytesSent')
+ data['requestCount'] = request_info.get('requestCount')
+ data['errorCount'] = request_info.get('errorCount')
+ data['bytesReceived'] = request_info.get('bytesReceived')
+ data['bytesSent'] = request_info.get('bytesSent')
return data or None
diff --git a/python.d/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
index f7c3e223b..dc8933220 100644
--- a/python.d/traefik.chart.py
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: traefik netdata python.d module
# Author: Alexandre Menezes (@ale_menezes)
+# SPDX-License-Identifier: GPL-3.0-or-later
from json import loads
from collections import defaultdict
@@ -32,7 +33,8 @@ CHARTS = {
['redirects', 'redirect', 'incremental'],
['bad_requests', 'bad', 'incremental'],
['other_requests', 'other', 'incremental']
- ]},
+ ]
+ },
'response_codes': {
'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'],
'lines': [
@@ -42,37 +44,45 @@ CHARTS = {
['4xx', None, 'incremental'],
['1xx', None, 'incremental'],
['other', None, 'incremental']
- ]},
+ ]
+ },
'detailed_response_codes': {
- 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes', 'stacked'],
- 'lines': [
- ]},
+ 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes',
+ 'stacked'],
+ 'lines': []
+ },
'requests': {
'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'],
'lines': [
['total_count', 'requests', 'incremental']
- ]},
+ ]
+ },
'total_response_time': {
'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'],
'lines': [
['total_response_time_sec', 'response', 'absolute', 1, 10000]
- ]},
+ ]
+ },
'average_response_time': {
'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'],
'lines': [
['average_response_time_sec', 'response', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'average_response_time_per_iteration': {
- 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings', 'traefik.average_response_time_per_iteration', 'line'],
+ 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings',
+ 'traefik.average_response_time_per_iteration', 'line'],
'lines': [
['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000]
- ]},
+ ]
+ },
'uptime': {
'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'],
'lines': [
['uptime_sec', 'uptime', 'absolute']
- ]}
+ ]
}
+}
HEALTH_STATS = [
'uptime_sec',
@@ -82,6 +92,7 @@ HEALTH_STATS = [
'total_status_code_count'
]
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
@@ -116,9 +127,11 @@ class Service(UrlService):
self.data['average_response_time_sec'] *= 1000000
self.data['total_response_time_sec'] *= 10000
if data['total_count'] != self.last_total_count:
- self.data['average_response_time_per_iteration_sec'] = (data['total_response_time_sec'] - self.last_total_response_time) * 1000000 / (data['total_count'] - self.last_total_count)
+ self.data['average_response_time_per_iteration_sec'] = \
+ (data['total_response_time_sec'] - self.last_total_response_time) * \
+ 1000000 / (data['total_count'] - self.last_total_count)
else:
- self.data['average_response_time_per_iteration_sec'] = 0
+ self.data['average_response_time_per_iteration_sec'] = 0
self.last_total_response_time = data['total_response_time_sec']
self.last_total_count = data['total_count']
@@ -165,6 +178,7 @@ class Service(UrlService):
self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
self.data[code] = value
+
def fetch_data_(raw_data, metrics):
data = dict()
diff --git a/python.d/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
index d8145c0b6..d889c2b33 100644
--- a/python.d/varnish.chart.py
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: varnish netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
import re
@@ -12,13 +13,22 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
priority = 60000
retries = 60
-ORDER = ['session_connections', 'client_requests',
- 'all_time_hit_rate', 'current_poll_hit_rate', 'cached_objects_expired', 'cached_objects_nuked',
- 'threads_total', 'threads_statistics', 'threads_queue_len',
- 'backend_connections', 'backend_requests',
- 'esi_statistics',
- 'memory_usage',
- 'uptime']
+ORDER = [
+ 'session_connections',
+ 'client_requests',
+ 'all_time_hit_rate',
+ 'current_poll_hit_rate',
+ 'cached_objects_expired',
+ 'cached_objects_nuked',
+ 'threads_total',
+ 'threads_statistics',
+ 'threads_queue_len',
+ 'backend_connections',
+ 'backend_requests',
+ 'esi_statistics',
+ 'memory_usage',
+ 'uptime'
+]
CHARTS = {
'session_connections': {
@@ -213,8 +223,9 @@ class Service(ExecutableService):
data.update(dict((param, value) for _, param, value in server_stats))
- data['memory_allocated'] = data['s0.g_bytes']
- data['memory_free'] = data['s0.g_space']
+ # varnish 5 uses default.g_bytes and default.g_space
+ data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
+ data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
return data
diff --git a/python.d/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
index be9baba92..20e15f4cb 100644
--- a/python.d/web_log.chart.py
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
@@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-
# Description: web log netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
import bisect
import re
import os
-import sys
from collections import namedtuple, defaultdict
from copy import deepcopy
@@ -16,21 +16,54 @@ except ImportError:
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
+try:
+ from sys import maxint
+except ImportError:
+ from sys import maxsize as maxint
+
from bases.collection import read_last_line
from bases.FrameworkServices.LogService import LogService
ORDER_APACHE_CACHE = ['apache_cache']
-ORDER_WEB = ['response_statuses', 'response_codes', 'bandwidth',
- 'response_time', 'response_time_hist', 'response_time_upstream', 'response_time_upstream_hist',
- 'requests_per_url', 'requests_per_user_defined', 'http_method', 'http_version',
- 'requests_per_ipproto', 'clients', 'clients_all']
-
-ORDER_SQUID = ['squid_response_statuses', 'squid_response_codes', 'squid_detailed_response_codes',
- 'squid_method', 'squid_mime_type', 'squid_hier_code', 'squid_transport_methods',
- 'squid_transport_errors', 'squid_code', 'squid_handling_opts', 'squid_object_types',
- 'squid_cache_events', 'squid_bytes', 'squid_duration', 'squid_clients', 'squid_clients_all']
+ORDER_WEB = [
+ 'response_statuses',
+ 'response_codes',
+ 'bandwidth',
+ 'response_time',
+ 'response_time_hist',
+ 'response_time_upstream',
+ 'response_time_upstream_hist',
+ 'requests_per_url',
+ 'requests_per_user_defined',
+ 'http_method',
+ 'vhost',
+ 'port',
+ 'http_version',
+ 'requests_per_ipproto',
+ 'clients',
+ 'clients_all'
+]
+
+ORDER_SQUID = [
+ 'squid_response_statuses',
+ 'squid_response_codes',
+ 'squid_detailed_response_codes',
+ 'squid_method',
+ 'squid_mime_type',
+ 'squid_hier_code',
+ 'squid_transport_methods',
+ 'squid_transport_errors',
+ 'squid_code',
+ 'squid_handling_opts',
+ 'squid_object_types',
+ 'squid_cache_events',
+ 'squid_bytes',
+ 'squid_duration',
+ 'squid_clients',
+ 'squid_clients_all'
+]
CHARTS_WEB = {
'response_codes': {
@@ -43,24 +76,27 @@ CHARTS_WEB = {
['1xx', None, 'incremental'],
['0xx', 'other', 'incremental'],
['unmatched', None, 'incremental']
- ]},
+ ]
+ },
'bandwidth': {
'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
'lines': [
['resp_length', 'received', 'incremental', 8, 1000],
['bytes_sent', 'sent', 'incremental', -8, 1000]
- ]},
+ ]
+ },
'response_time': {
'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
'lines': [
['resp_time_min', 'min', 'incremental', 1, 1000],
['resp_time_max', 'max', 'incremental', 1, 1000],
['resp_time_avg', 'avg', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'response_time_hist': {
'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'response_time_upstream': {
'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
'web_log.response_time_upstream', 'area'],
@@ -68,62 +104,80 @@ CHARTS_WEB = {
['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'response_time_upstream_hist': {
'options': [None, 'Processing Time Histogram', 'requests/s', 'timings',
'web_log.response_time_upstream_hist', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'clients': {
'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
'lines': [
['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1],
['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1]
- ]},
+ ]
+ },
'clients_all': {
'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'],
'lines': [
['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1],
['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1]
- ]},
+ ]
+ },
'http_method': {
'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'],
'lines': [
['GET', 'GET', 'incremental', 1, 1]
- ]},
+ ]
+ },
'http_version': {
'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions',
'web_log.http_version', 'stacked'],
- 'lines': []},
+ 'lines': []
+ },
'requests_per_ipproto': {
'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto',
'stacked'],
'lines': [
['req_ipv4', 'ipv4', 'incremental', 1, 1],
['req_ipv6', 'ipv6', 'incremental', 1, 1]
- ]},
+ ]
+ },
'response_statuses': {
- 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses',
- 'stacked'],
+ 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses', 'stacked'],
'lines': [
['successful_requests', 'success', 'incremental', 1, 1],
['server_errors', 'error', 'incremental', 1, 1],
['redirects', 'redirect', 'incremental', 1, 1],
['bad_requests', 'bad', 'incremental', 1, 1],
['other_requests', 'other', 'incremental', 1, 1]
- ]},
+ ]
+ },
'requests_per_url': {
- 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url',
- 'stacked'],
+ 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url', 'stacked'],
'lines': [
['url_pattern_other', 'other', 'incremental', 1, 1]
- ]},
+ ]
+ },
'requests_per_user_defined': {
'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
'web_log.requests_per_user_defined', 'stacked'],
'lines': [
['user_pattern_other', 'other', 'incremental', 1, 1]
- ]}
+ ]
+ },
+ 'port': {
+ 'options': [None, 'Requests Per Port', 'requests/s', 'port', 'web_log.port', 'stacked'],
+ 'lines': [
+ ['port_80', 'http', 'incremental', 1, 1],
+ ['port_443', 'https', 'incremental', 1, 1]
+ ]
+ },
+ 'vhost': {
+ 'options': [None, 'Requests Per Vhost', 'requests/s', 'vhost', 'web_log.vhost', 'stacked'],
+ 'lines': []
+ }
}
CHARTS_APACHE_CACHE = {
@@ -131,10 +185,11 @@ CHARTS_APACHE_CACHE = {
'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache',
'stacked'],
'lines': [
- ["hit", 'cache', "percentage-of-absolute-row"],
- ["miss", None, "percentage-of-absolute-row"],
- ["other", None, "percentage-of-absolute-row"]
- ]}
+ ['hit', 'cache', 'percentage-of-absolute-row'],
+ ['miss', None, 'percentage-of-absolute-row'],
+ ['other', None, 'percentage-of-absolute-row']
+ ]
+ }
}
CHARTS_SQUID = {
@@ -145,13 +200,15 @@ CHARTS_SQUID = {
['duration_min', 'min', 'incremental', 1, 1000],
['duration_max', 'max', 'incremental', 1, 1000],
['duration_avg', 'avg', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'squid_bytes': {
'options': [None, 'Amount Of Data Delivered To The Clients',
'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
'lines': [
['bytes', 'sent', 'incremental', 8, 1000]
- ]},
+ ]
+ },
'squid_response_statuses': {
'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
'stacked'],
@@ -161,7 +218,8 @@ CHARTS_SQUID = {
['redirects', 'redirect', 'incremental', 1, 1],
['bad_requests', 'bad', 'incremental', 1, 1],
['other_requests', 'other', 'incremental', 1, 1]
- ]},
+ ]
+ },
'squid_response_codes': {
'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
'web_log.squid_response_codes', 'stacked'],
@@ -174,89 +232,113 @@ CHARTS_SQUID = {
['0xx', None, 'incremental'],
['other', None, 'incremental'],
['unmatched', None, 'incremental']
- ]},
+ ]
+ },
'squid_code': {
'options': [None, 'Responses Per Cache Result Of The Request',
'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_detailed_response_codes': {
'options': [None, 'Detailed Response Codes',
'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_hier_code': {
'options': [None, 'Responses Per Hierarchy Code',
'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_method': {
'options': [None, 'Requests Per Method',
'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_mime_type': {
'options': [None, 'Requests Per MIME Type',
'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_clients': {
'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
'web_log.squid_clients', 'stacked'],
'lines': [
['unique_ipv4', 'ipv4', 'incremental'],
['unique_ipv6', 'ipv6', 'incremental']
- ]},
+ ]
+ },
'squid_clients_all': {
'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
'web_log.squid_clients_all', 'stacked'],
'lines': [
['unique_tot_ipv4', 'ipv4', 'absolute'],
['unique_tot_ipv6', 'ipv6', 'absolute']
- ]},
+ ]
+ },
'squid_transport_methods': {
'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
'web_log.squid_transport_methods', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_transport_errors': {
'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
'web_log.squid_transport_errors', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_handling_opts': {
'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
'web_log.squid_handling_opts', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_object_types': {
'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
'web_log.squid_object_types', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_cache_events': {
'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
'web_log.squid_cache_events', 'stacked'],
- 'lines': [
- ]}
+ 'lines': []
+ }
}
NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
-SQUID_CODES = dict(TCP='squid_transport_methods', UDP='squid_transport_methods', NONE='squid_transport_methods',
- CLIENT='squid_handling_opts', IMS='squid_handling_opts', ASYNC='squid_handling_opts',
- SWAPFAIL='squid_handling_opts', REFRESH='squid_handling_opts', SHARED='squid_handling_opts',
- REPLY='squid_handling_opts', NEGATIVE='squid_object_types', STALE='squid_object_types',
- OFFLINE='squid_object_types', INVALID='squid_object_types', FAIL='squid_object_types',
- MODIFIED='squid_object_types', UNMODIFIED='squid_object_types', REDIRECT='squid_object_types',
- HIT='squid_cache_events', MEM='squid_cache_events', MISS='squid_cache_events',
- DENIED='squid_cache_events', NOFETCH='squid_cache_events', TUNNEL='squid_cache_events',
- ABORTED='squid_transport_errors', TIMEOUT='squid_transport_errors')
+SQUID_CODES = {
+ 'TCP': 'squid_transport_methods',
+ 'UDP': 'squid_transport_methods',
+ 'NONE': 'squid_transport_methods',
+ 'CLIENT': 'squid_handling_opts',
+ 'IMS': 'squid_handling_opts',
+ 'ASYNC': 'squid_handling_opts',
+ 'SWAPFAIL': 'squid_handling_opts',
+ 'REFRESH': 'squid_handling_opts',
+ 'SHARED': 'squid_handling_opts',
+ 'REPLY': 'squid_handling_opts',
+ 'NEGATIVE': 'squid_object_types',
+ 'STALE': 'squid_object_types',
+ 'OFFLINE': 'squid_object_types',
+ 'INVALID': 'squid_object_types',
+ 'FAIL': 'squid_object_types',
+ 'MODIFIED': 'squid_object_types',
+ 'UNMODIFIED': 'squid_object_types',
+ 'REDIRECT': 'squid_object_types',
+ 'HIT': 'squid_cache_events',
+ 'MEM': 'squid_cache_events',
+ 'MISS': 'squid_cache_events',
+ 'DENIED': 'squid_cache_events',
+ 'NOFETCH': 'squid_cache_events',
+ 'TUNNEL': 'squid_cache_events',
+ 'ABORTED': 'squid_transport_errors',
+ 'TIMEOUT': 'squid_transport_errors'
+}
REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)')
+MIME_TYPES = ['application', 'audio', 'example', 'font', 'image', 'message', 'model', 'multipart', 'text', 'video']
+
class Service(LogService):
def __init__(self, configuration=None, name=None):
@@ -283,7 +365,7 @@ class Service(LogService):
log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
if log_type not in log_types:
- self.error("bad log type {log_type}. Supported types: {types}".format(log_type=log_type,
+ self.error('bad log type {log_type}. Supported types: {types}'.format(log_type=log_type,
types=log_types.keys()))
return False
@@ -317,12 +399,35 @@ class Web:
self.definitions = deepcopy(CHARTS_WEB)
self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
self.storage = dict()
- self.data = {'bytes_sent': 0, 'resp_length': 0, 'resp_time_min': 0, 'resp_time_max': 0,
- 'resp_time_avg': 0, 'resp_time_upstream_min': 0, 'resp_time_upstream_max': 0,
- 'resp_time_upstream_avg': 0, 'unique_cur_ipv4': 0, 'unique_cur_ipv6': 0, '2xx': 0,
- '5xx': 0, '3xx': 0, '4xx': 0, '1xx': 0, '0xx': 0, 'unmatched': 0, 'req_ipv4': 0,
- 'req_ipv6': 0, 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
- 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0, 'GET': 0}
+ self.data = {
+ 'bytes_sent': 0,
+ 'resp_length': 0,
+ 'resp_time_min': 0,
+ 'resp_time_max': 0,
+ 'resp_time_avg': 0,
+ 'resp_time_upstream_min': 0,
+ 'resp_time_upstream_max': 0,
+ 'resp_time_upstream_avg': 0,
+ 'unique_cur_ipv4': 0,
+ 'unique_cur_ipv6': 0,
+ '2xx': 0,
+ '5xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '1xx': 0,
+ '0xx': 0,
+ 'unmatched': 0,
+ 'req_ipv4': 0,
+ 'req_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ 'GET': 0
+ }
def __getattr__(self, item):
return getattr(self.service, item)
@@ -367,21 +472,21 @@ class Web:
histogram = self.configuration.get('histogram', None)
if isinstance(histogram, list):
self.storage['bucket_index'] = histogram[:]
- self.storage['bucket_index'].append(sys.maxint)
+ self.storage['bucket_index'].append(maxint)
self.storage['buckets'] = [0] * (len(histogram) + 1)
self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
hist_lines = self.definitions['response_time_hist']['lines']
upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
for i, le in enumerate(histogram):
- hist_key = "response_time_hist_%d" % i
- upstream_hist_key = "response_time_upstream_hist_%d" % i
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
- hist_lines.append(["response_time_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1])
- upstream_hist_lines.append(["response_time_upstream_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1])
+ hist_lines.append(['response_time_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
+ upstream_hist_lines.append(['response_time_upstream_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
elif histogram is not None:
- self.error("expect histogram list, but was {0}".format(type(histogram)))
+ self.error('expect histogram list, but was {0}'.format(type(histogram)))
if not self.configuration.get('all_time', True):
self.order.remove('clients_all')
@@ -395,10 +500,11 @@ class Web:
for code in codes:
self.order.append('detailed_response_codes%s' % code)
- self.definitions['detailed_response_codes%s' % code] \
- = {'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
- 'web_log.detailed_response_codes%s' % code, 'stacked'],
- 'lines': []}
+ self.definitions['detailed_response_codes%s' % code] = {
+ 'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
+ 'web_log.detailed_response_codes%s' % code, 'stacked'],
+ 'lines': []
+ }
# Add 'requests_per_url' chart if specified in the configuration
if self.storage['url_pattern']:
@@ -499,8 +605,8 @@ class Web:
buckets = self.storage['buckets']
upstream_buckets = self.storage['upstream_buckets']
for i in range(0, len(self.storage['bucket_index'])):
- hist_key = "response_time_hist_%d" % i
- upstream_hist_key = "response_time_upstream_hist_%d" % i
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
self.data[hist_key] = buckets[i]
self.data[upstream_hist_key] = upstream_buckets[i]
@@ -596,7 +702,7 @@ class Web:
We are here only if "custom_log_format" is in logs. We need to make sure:
1. "custom_log_format" is a dict
2. "pattern" in "custom_log_format" and pattern is <str> instance
- 3. if "time_multiplier" is in "custom_log_format" it must be <int> instance
+ 3. if "time_multiplier" is in "custom_log_format" it must be <int> or <float> instance
If all parameters is ok we need to make sure:
1. Pattern search is success
@@ -623,8 +729,8 @@ class Web:
resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0
- if not isinstance(resp_time_func, int):
- return find_regex_return(msg='Custom log: "time_multiplier" is not an integer')
+ if not isinstance(resp_time_func, (int, float)):
+ return find_regex_return(msg='Custom log: "time_multiplier" is not an integer or a float')
try:
regex = re.compile(pattern)
@@ -701,6 +807,23 @@ class Web:
'incremental'])
self.data[dim_id] = 0
self.data[dim_id] += 1
+ # requests per port number
+ if match_dict.get('port'):
+ if match_dict['port'] not in self.data:
+ self.charts['port'].add_dimension([match_dict['port'],
+ match_dict['port'],
+ 'incremental'])
+ self.data[match_dict['port']] = 0
+ self.data[match_dict['port']] += 1
+ # requests per vhost
+ if match_dict.get('vhost'):
+ dim_id = match_dict['vhost'].replace('.', '_')
+ if dim_id not in self.data:
+ self.charts['vhost'].add_dimension([dim_id,
+ match_dict['vhost'],
+ 'incremental'])
+ self.data[dim_id] = 0
+ self.data[dim_id] += 1
def get_data_per_response_codes_detailed(self, code):
"""
@@ -788,12 +911,29 @@ class Squid:
self.definitions = CHARTS_SQUID
self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
self.storage = dict()
- self.data = {'duration_max': 0, 'duration_avg': 0, 'duration_min': 0, 'bytes': 0,
- '0xx': 0, '1xx': 0, '2xx': 0, '3xx': 0, '4xx': 0, '5xx': 0,
- 'other': 0, 'unmatched': 0, 'unique_ipv4': 0, 'unique_ipv6': 0,
- 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
- 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0
- }
+ self.data = {
+ 'duration_max': 0,
+ 'duration_avg': 0,
+ 'duration_min': 0,
+ 'bytes': 0,
+ '0xx': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'unmatched': 0,
+ 'unique_ipv4': 0,
+ 'unique_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0
+ }
def __getattr__(self, item):
return getattr(self.service, item)
@@ -811,30 +951,35 @@ class Squid:
r' (?P<method>[A-Z_]+)'
r' (?P<url>[^ ]+)'
r' (?P<user>[^ ]+)'
- r' (?P<hier_code>[A-Z_]+)/[\da-f.:-]+'
- r' (?P<mime_type>[^\n]+)')
+ r' (?P<hier_code>[A-Z_]+)/[\da-z.:-]+'
+ r' (?P<mime_type>[A-Za-z-]*)')
match = self.storage['regex'].search(last_line)
if not match:
self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
return False
self.storage['dynamic'] = {
- 'http_code':
- {'chart': 'squid_detailed_response_codes',
+ 'http_code': {
+ 'chart': 'squid_detailed_response_codes',
'func_dim_id': None,
- 'func_dim': None},
+ 'func_dim': None
+ },
'hier_code': {
'chart': 'squid_hier_code',
'func_dim_id': None,
- 'func_dim': lambda v: v.replace('HIER_', '')},
+ 'func_dim': lambda v: v.replace('HIER_', '')
+ },
'method': {
'chart': 'squid_method',
'func_dim_id': None,
- 'func_dim': None},
+ 'func_dim': None
+ },
'mime_type': {
'chart': 'squid_mime_type',
- 'func_dim_id': lambda v: v.split('/')[0],
- 'func_dim': None}}
+ 'func_dim_id': lambda v: str.lower(v) if str.lower(v) in MIME_TYPES else 'unknown',
+ 'func_dim': None
+ }
+ }
if not self.configuration.get('all_time', True):
self.order.remove('squid_clients_all')
return True
@@ -951,6 +1096,7 @@ def get_timings(timings, time):
timings['summary'] += time
timings['count'] += 1
+
def get_hist(index, buckets, time):
"""
:param index: histogram index (Ex. [10, 50, 100, 150, ...])
@@ -964,6 +1110,7 @@ def get_hist(index, buckets, time):
else:
break
+
def address_not_in_pool(pool, address, pool_size):
"""
:param pool: list of ip addresses
diff --git a/python.d/Makefile.in b/python.d/Makefile.in
deleted file mode 100644
index d6e11d0cf..000000000
--- a/python.d/Makefile.in
+++ /dev/null
@@ -1,1107 +0,0 @@
-# Makefile.in generated by automake 1.14.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
-
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-
-VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
-am__make_running_with_option = \
- case $${target_option-} in \
- ?) ;; \
- *) echo "am__make_running_with_option: internal error: invalid" \
- "target option '$${target_option-}' specified" >&2; \
- exit 1;; \
- esac; \
- has_opt=no; \
- sane_makeflags=$$MAKEFLAGS; \
- if $(am__is_gnu_make); then \
- sane_makeflags=$$MFLAGS; \
- else \
- case $$MAKEFLAGS in \
- *\\[\ \ ]*) \
- bs=\\; \
- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
- esac; \
- fi; \
- skip_next=no; \
- strip_trailopt () \
- { \
- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
- }; \
- for flg in $$sane_makeflags; do \
- test $$skip_next = yes && { skip_next=no; continue; }; \
- case $$flg in \
- *=*|--*) continue;; \
- -*I) strip_trailopt 'I'; skip_next=yes;; \
- -*I?*) strip_trailopt 'I';; \
- -*O) strip_trailopt 'O'; skip_next=yes;; \
- -*O?*) strip_trailopt 'O';; \
- -*l) strip_trailopt 'l'; skip_next=yes;; \
- -*l?*) strip_trailopt 'l';; \
- -[dEDm]) skip_next=yes;; \
- -[JT]) skip_next=yes;; \
- esac; \
- case $$flg in \
- *$$target_option*) has_opt=yes; break;; \
- esac; \
- done; \
- test $$has_opt = yes
-am__make_dryrun = (target_option=n; $(am__make_running_with_option))
-am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(top_srcdir)/build/subst.inc $(srcdir)/Makefile.in \
- $(srcdir)/Makefile.am $(dist_python_SCRIPTS) \
- $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_python_DATA) $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonmodules_DATA) \
- $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA) \
- $(dist_third_party_DATA)
-subdir = python.d
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/m4/ax_c___atomic.m4 \
- $(top_srcdir)/m4/ax_c__generic.m4 $(top_srcdir)/m4/ax_c_lto.m4 \
- $(top_srcdir)/m4/ax_c_mallinfo.m4 \
- $(top_srcdir)/m4/ax_c_mallopt.m4 \
- $(top_srcdir)/m4/ax_check_compile_flag.m4 \
- $(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
- $(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/jemalloc.m4 \
- $(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
- srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
- for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
- for p in $$list; do echo "$$p $$p"; done | \
- sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
- $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
- if (++n[$$2] == $(am__install_max)) \
- { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
- END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
- sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
- sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__uninstall_files_from_dir = { \
- test -z "$$files" \
- || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
- || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
- $(am__cd) "$$dir" && rm -f $$files; }; \
- }
-am__installdirs = "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" \
- "$(DESTDIR)$(bases_framework_servicesdir)" \
- "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" \
- "$(DESTDIR)$(python_urllib3_backportsdir)" \
- "$(DESTDIR)$(python_urllib3_contribdir)" \
- "$(DESTDIR)$(python_urllib3_packagesdir)" \
- "$(DESTDIR)$(python_urllib3_securetransportdir)" \
- "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
- "$(DESTDIR)$(python_urllib3_utildir)" \
- "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" \
- "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)"
-SCRIPTS = $(dist_python_SCRIPTS)
-AM_V_P = $(am__v_P_@AM_V@)
-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
-am__v_P_0 = false
-am__v_P_1 = :
-AM_V_GEN = $(am__v_GEN_@AM_V@)
-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
-am__v_GEN_0 = @echo " GEN " $@;
-am__v_GEN_1 =
-AM_V_at = $(am__v_at_@AM_V@)
-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
-am__v_at_0 = @
-am__v_at_1 =
-SOURCES =
-DIST_SOURCES =
-am__can_run_installinfo = \
- case $$AM_UPDATE_INFO_DIR in \
- n|no|NO) false;; \
- *) (install-info --version) >/dev/null 2>&1;; \
- esac
-DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
- $(dist_python_DATA) $(dist_python_urllib3_DATA) \
- $(dist_python_urllib3_backports_DATA) \
- $(dist_python_urllib3_contrib_DATA) \
- $(dist_python_urllib3_packages_DATA) \
- $(dist_python_urllib3_securetransport_DATA) \
- $(dist_python_urllib3_ssl_match_hostname_DATA) \
- $(dist_python_urllib3_util_DATA) $(dist_pythonmodules_DATA) \
- $(dist_pythonyaml2_DATA) $(dist_pythonyaml3_DATA) \
- $(dist_third_party_DATA)
-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
-IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
-LDFLAGS = @LDFLAGS@
-LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
-LIBCAP_LIBS = @LIBCAP_LIBS@
-LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
-LIBMNL_LIBS = @LIBMNL_LIBS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MATH_CFLAGS = @MATH_CFLAGS@
-MATH_LIBS = @MATH_LIBS@
-MKDIR_P = @MKDIR_P@
-NFACCT_CFLAGS = @NFACCT_CFLAGS@
-NFACCT_LIBS = @NFACCT_LIBS@
-OBJEXT = @OBJEXT@
-OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
-OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
-OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
-OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
-OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
-OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
-OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
-OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
-OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
-OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
-OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
-OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
-PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PKG_CONFIG = @PKG_CONFIG@
-PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
-PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
-PTHREAD_CC = @PTHREAD_CC@
-PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
-PTHREAD_LIBS = @PTHREAD_LIBS@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SSE_CANDIDATE = @SSE_CANDIDATE@
-STRIP = @STRIP@
-UUID_CFLAGS = @UUID_CFLAGS@
-UUID_LIBS = @UUID_LIBS@
-VERSION = @VERSION@
-ZLIB_CFLAGS = @ZLIB_CFLAGS@
-ZLIB_LIBS = @ZLIB_LIBS@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-ax_pthread_config = @ax_pthread_config@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-builddir = @builddir@
-cachedir = @cachedir@
-chartsdir = @chartsdir@
-configdir = @configdir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-has_jemalloc = @has_jemalloc@
-has_tcmalloc = @has_tcmalloc@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-logdir = @logdir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-nodedir = @nodedir@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-pluginsdir = @pluginsdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-pythondir = @pythondir@
-registrydir = @registrydir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-varlibdir = @varlibdir@
-webdir = @webdir@
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- python-modules-installer.sh \
- $(NULL)
-
-SUFFIXES = .in
-dist_python_SCRIPTS = \
- python-modules-installer.sh \
- $(NULL)
-
-dist_python_DATA = \
- README.md \
- apache.chart.py \
- beanstalk.chart.py \
- bind_rndc.chart.py \
- ceph.chart.py \
- chrony.chart.py \
- couchdb.chart.py \
- cpufreq.chart.py \
- cpuidle.chart.py \
- dns_query_time.chart.py \
- dnsdist.chart.py \
- dovecot.chart.py \
- elasticsearch.chart.py \
- example.chart.py \
- exim.chart.py \
- fail2ban.chart.py \
- freeradius.chart.py \
- go_expvar.chart.py \
- haproxy.chart.py \
- hddtemp.chart.py \
- httpcheck.chart.py \
- icecast.chart.py \
- ipfs.chart.py \
- isc_dhcpd.chart.py \
- mdstat.chart.py \
- memcached.chart.py \
- mongodb.chart.py \
- mysql.chart.py \
- nginx.chart.py \
- nginx_plus.chart.py \
- nsd.chart.py \
- ntpd.chart.py \
- ovpn_status_log.chart.py \
- phpfpm.chart.py \
- portcheck.chart.py \
- postfix.chart.py \
- postgres.chart.py \
- powerdns.chart.py \
- rabbitmq.chart.py \
- redis.chart.py \
- retroshare.chart.py \
- samba.chart.py \
- sensors.chart.py \
- springboot.chart.py \
- squid.chart.py \
- smartd_log.chart.py \
- tomcat.chart.py \
- traefik.chart.py \
- varnish.chart.py \
- web_log.chart.py \
- $(NULL)
-
-pythonmodulesdir = $(pythondir)/python_modules
-dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- python_modules/base.py \
- $(NULL)
-
-basesdir = $(pythonmodulesdir)/bases
-dist_bases_DATA = \
- python_modules/bases/__init__.py \
- python_modules/bases/charts.py \
- python_modules/bases/collection.py \
- python_modules/bases/loaders.py \
- python_modules/bases/loggers.py \
- $(NULL)
-
-bases_framework_servicesdir = $(basesdir)/FrameworkServices
-dist_bases_framework_services_DATA = \
- python_modules/bases/FrameworkServices/__init__.py \
- python_modules/bases/FrameworkServices/ExecutableService.py \
- python_modules/bases/FrameworkServices/LogService.py \
- python_modules/bases/FrameworkServices/MySQLService.py \
- python_modules/bases/FrameworkServices/SimpleService.py \
- python_modules/bases/FrameworkServices/SocketService.py \
- python_modules/bases/FrameworkServices/UrlService.py \
- $(NULL)
-
-third_partydir = $(pythonmodulesdir)/third_party
-dist_third_party_DATA = \
- python_modules/third_party/__init__.py \
- python_modules/third_party/ordereddict.py \
- python_modules/third_party/lm_sensors.py \
- $(NULL)
-
-pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
-dist_pythonyaml2_DATA = \
- python_modules/pyyaml2/__init__.py \
- python_modules/pyyaml2/composer.py \
- python_modules/pyyaml2/constructor.py \
- python_modules/pyyaml2/cyaml.py \
- python_modules/pyyaml2/dumper.py \
- python_modules/pyyaml2/emitter.py \
- python_modules/pyyaml2/error.py \
- python_modules/pyyaml2/events.py \
- python_modules/pyyaml2/loader.py \
- python_modules/pyyaml2/nodes.py \
- python_modules/pyyaml2/parser.py \
- python_modules/pyyaml2/reader.py \
- python_modules/pyyaml2/representer.py \
- python_modules/pyyaml2/resolver.py \
- python_modules/pyyaml2/scanner.py \
- python_modules/pyyaml2/serializer.py \
- python_modules/pyyaml2/tokens.py \
- $(NULL)
-
-pythonyaml3dir = $(pythonmodulesdir)/pyyaml3
-dist_pythonyaml3_DATA = \
- python_modules/pyyaml3/__init__.py \
- python_modules/pyyaml3/composer.py \
- python_modules/pyyaml3/constructor.py \
- python_modules/pyyaml3/cyaml.py \
- python_modules/pyyaml3/dumper.py \
- python_modules/pyyaml3/emitter.py \
- python_modules/pyyaml3/error.py \
- python_modules/pyyaml3/events.py \
- python_modules/pyyaml3/loader.py \
- python_modules/pyyaml3/nodes.py \
- python_modules/pyyaml3/parser.py \
- python_modules/pyyaml3/reader.py \
- python_modules/pyyaml3/representer.py \
- python_modules/pyyaml3/resolver.py \
- python_modules/pyyaml3/scanner.py \
- python_modules/pyyaml3/serializer.py \
- python_modules/pyyaml3/tokens.py \
- $(NULL)
-
-python_urllib3dir = $(pythonmodulesdir)/urllib3
-dist_python_urllib3_DATA = \
- python_modules/urllib3/__init__.py \
- python_modules/urllib3/_collections.py \
- python_modules/urllib3/connection.py \
- python_modules/urllib3/connectionpool.py \
- python_modules/urllib3/exceptions.py \
- python_modules/urllib3/fields.py \
- python_modules/urllib3/filepost.py \
- python_modules/urllib3/response.py \
- python_modules/urllib3/poolmanager.py \
- python_modules/urllib3/request.py \
- $(NULL)
-
-python_urllib3_utildir = $(python_urllib3dir)/util
-dist_python_urllib3_util_DATA = \
- python_modules/urllib3/util/__init__.py \
- python_modules/urllib3/util/connection.py \
- python_modules/urllib3/util/request.py \
- python_modules/urllib3/util/response.py \
- python_modules/urllib3/util/retry.py \
- python_modules/urllib3/util/selectors.py \
- python_modules/urllib3/util/ssl_.py \
- python_modules/urllib3/util/timeout.py \
- python_modules/urllib3/util/url.py \
- python_modules/urllib3/util/wait.py \
- $(NULL)
-
-python_urllib3_packagesdir = $(python_urllib3dir)/packages
-dist_python_urllib3_packages_DATA = \
- python_modules/urllib3/packages/__init__.py \
- python_modules/urllib3/packages/ordered_dict.py \
- python_modules/urllib3/packages/six.py \
- $(NULL)
-
-python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
-dist_python_urllib3_backports_DATA = \
- python_modules/urllib3/packages/backports/__init__.py \
- python_modules/urllib3/packages/backports/makefile.py \
- $(NULL)
-
-python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
-dist_python_urllib3_ssl_match_hostname_DATA = \
- python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
- python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
- $(NULL)
-
-python_urllib3_contribdir = $(python_urllib3dir)/contrib
-dist_python_urllib3_contrib_DATA = \
- python_modules/urllib3/contrib/__init__.py \
- python_modules/urllib3/contrib/appengine.py \
- python_modules/urllib3/contrib/ntlmpool.py \
- python_modules/urllib3/contrib/pyopenssl.py \
- python_modules/urllib3/contrib/securetransport.py \
- python_modules/urllib3/contrib/socks.py \
- $(NULL)
-
-python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
-dist_python_urllib3_securetransport_DATA = \
- python_modules/urllib3/contrib/_securetransport/__init__.py \
- python_modules/urllib3/contrib/_securetransport/bindings.py \
- python_modules/urllib3/contrib/_securetransport/low_level.py \
- $(NULL)
-
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .in
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu python.d/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --gnu python.d/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-$(top_srcdir)/build/subst.inc:
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
- done | \
- sed -e 'p;s,.*/,,;n' \
- -e 'h;s|.*|.|' \
- -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
- if ($$2 == $$4) { files[d] = files[d] " " $$1; \
- if (++n[d] == $(am__install_max)) { \
- print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
- else { print "f", d "/" $$4, $$1 } } \
- END { for (d in files) print "f", d, files[d] }' | \
- while read type dir files; do \
- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
- test -z "$$files" || { \
- echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \
- $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \
- } \
- ; done
-
-uninstall-dist_pythonSCRIPTS:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \
- files=`for p in $$list; do echo "$$p"; done | \
- sed -e 's,.*/,,;$(transform)'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_basesDATA: $(dist_bases_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \
- done
-
-uninstall-dist_basesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir)
-install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \
- done
-
-uninstall-dist_bases_framework_servicesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonDATA: $(dist_python_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \
- done
-
-uninstall-dist_pythonDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_backportsDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_contribDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_packagesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_securetransportDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
-install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
- done
-
-uninstall-dist_python_urllib3_utilDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \
- done
-
-uninstall-dist_pythonmodulesDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml2DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
-install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \
- done
-
-uninstall-dist_pythonyaml3DATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
-install-dist_third_partyDATA: $(dist_third_party_DATA)
- @$(NORMAL_INSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- if test -n "$$list"; then \
- echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \
- $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \
- fi; \
- for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- echo "$$d$$p"; \
- done | $(am__base_list) | \
- while read files; do \
- echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \
- $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \
- done
-
-uninstall-dist_third_partyDATA:
- @$(NORMAL_UNINSTALL)
- @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
- files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir)
-tags TAGS:
-
-ctags CTAGS:
-
-cscope cscopelist:
-
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
- for dir in "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- if test -z '$(STRIP)'; then \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- install; \
- else \
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
- fi
-mostlyclean-generic:
-
-clean-generic:
- -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
- -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
-clean: clean-am
-
-clean-am: clean-generic mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-generic
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am: install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \
- install-dist_pythonyaml3DATA install-dist_third_partyDATA
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA
-
-.MAKE: install-am install-strip
-
-.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
- ctags-am distclean distclean-generic distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dist_basesDATA \
- install-dist_bases_framework_servicesDATA \
- install-dist_pythonDATA install-dist_pythonSCRIPTS \
- install-dist_python_urllib3DATA \
- install-dist_python_urllib3_backportsDATA \
- install-dist_python_urllib3_contribDATA \
- install-dist_python_urllib3_packagesDATA \
- install-dist_python_urllib3_securetransportDATA \
- install-dist_python_urllib3_ssl_match_hostnameDATA \
- install-dist_python_urllib3_utilDATA \
- install-dist_pythonmodulesDATA install-dist_pythonyaml2DATA \
- install-dist_pythonyaml3DATA install-dist_third_partyDATA \
- install-dvi install-dvi-am install-exec install-exec-am \
- install-html install-html-am install-info install-info-am \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
- uninstall uninstall-am uninstall-dist_basesDATA \
- uninstall-dist_bases_framework_servicesDATA \
- uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
- uninstall-dist_python_urllib3DATA \
- uninstall-dist_python_urllib3_backportsDATA \
- uninstall-dist_python_urllib3_contribDATA \
- uninstall-dist_python_urllib3_packagesDATA \
- uninstall-dist_python_urllib3_securetransportDATA \
- uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
- uninstall-dist_python_urllib3_utilDATA \
- uninstall-dist_pythonmodulesDATA \
- uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
- uninstall-dist_third_partyDATA
-
-.in:
- if sed \
- -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
- -e 's#[@]sbindir_POST@#$(sbindir)#g' \
- -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
- -e 's#[@]pythondir_POST@#$(pythondir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/python.d/README.md b/python.d/README.md
deleted file mode 100644
index faabba2c7..000000000
--- a/python.d/README.md
+++ /dev/null
@@ -1,2363 +0,0 @@
-# Disclaimer
-
-Every module should be compatible with python2 and python3.
-All third party libraries should be installed system-wide or in `python_modules` directory.
-Module configurations are written in YAML and **pyYAML is required**.
-
-Every configuration file must have one of two formats:
-
-- Configuration for only one job:
-
-```yaml
-update_every : 2 # update frequency
-retries : 1 # how many failures in update() is tolerated
-priority : 20000 # where it is shown on dashboard
-
-other_var1 : bla # variables passed to module
-other_var2 : alb
-```
-
-- Configuration for many jobs (ex. mysql):
-
-```yaml
-# module defaults:
-update_every : 2
-retries : 1
-priority : 20000
-
-local: # job name
- update_every : 5 # job update frequency
- other_var1 : some_val # module specific variable
-
-other_job:
- priority : 5 # job position on dashboard
- retries : 20 # job retries
- other_var2 : val # module specific variable
-```
-
-`update_every`, `retries`, and `priority` are always optional.
-
----
-
-The following python.d modules are supported:
-
-# apache
-
-This module will monitor one or more apache servers depending on configuration.
-
-**Requirements:**
- * apache with enabled `mod_status`
-
-It produces the following charts:
-
-1. **Requests** in requests/s
- * requests
-
-2. **Connections**
- * connections
-
-3. **Async Connections**
- * keepalive
- * closing
- * writing
-
-4. **Bandwidth** in kilobytes/s
- * sent
-
-5. **Workers**
- * idle
- * busy
-
-6. **Lifetime Avg. Requests/s** in requests/s
- * requests_sec
-
-7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
- * size_sec
-
-8. **Lifetime Avg. Response Size** in bytes/request
- * size_req
-
-### configuration
-
-Needs only `url` to server's `server-status?auto`
-
-Here is an example for 2 servers:
-
-```yaml
-update_every : 10
-priority : 90100
-
-local:
- url : 'http://localhost/server-status?auto'
- retries : 20
-
-remote:
- url : 'http://www.apache.org/server-status?auto'
- update_every : 5
- retries : 4
-```
-
-Without configuration, module attempts to connect to `http://localhost/server-status?auto`
-
----
-
-# apache_cache
-
-Module monitors apache mod_cache log and produces only one chart:
-
-**cached responses** in percent cached
- * hit
- * miss
- * other
-
-### configuration
-
-Sample:
-
-```yaml
-update_every : 10
-priority : 120000
-retries : 5
-log_path : '/var/log/apache2/cache.log'
-```
-
-If no configuration is given, module will attempt to read log file at `/var/log/apache2/cache.log`
-
----
-
-# beanstalk
-
-Module provides server and tube level statistics:
-
-**Requirements:**
- * `python-beanstalkc`
- * `python-yaml`
-
-**Server statistics:**
-
-1. **Cpu usage** in cpu time
- * user
- * system
-
-2. **Jobs rate** in jobs/s
- * total
- * timeouts
-
-3. **Connections rate** in connections/s
- * connections
-
-4. **Commands rate** in commands/s
- * put
- * peek
- * peek-ready
- * peek-delayed
- * peek-buried
- * reserve
- * use
- * watch
- * ignore
- * delete
- * release
- * bury
- * kick
- * stats
- * stats-job
- * stats-tube
- * list-tubes
- * list-tube-used
- * list-tubes-watched
- * pause-tube
-
-5. **Current tubes** in tubes
- * tubes
-
-6. **Current jobs** in jobs
- * urgent
- * ready
- * reserved
- * delayed
- * buried
-
-7. **Current connections** in connections
- * written
- * producers
- * workers
- * waiting
-
-8. **Binlog** in records/s
- * written
- * migrated
-
-9. **Uptime** in seconds
- * uptime
-
-**Per tube statistics:**
-
-1. **Jobs rate** in jobs/s
- * jobs
-
-2. **Jobs** in jobs
- * using
- * ready
- * reserved
- * delayed
- * buried
-
-3. **Connections** in connections
- * using
- * waiting
- * watching
-
-4. **Commands** in commands/s
- * deletes
- * pauses
-
-5. **Pause** in seconds
- * since
- * left
-
-
-### configuration
-
-Sample:
-
-```yaml
-host : '127.0.0.1'
-port : 11300
-```
-
-If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
-
----
-
-# bind_rndc
-
-Module parses bind dump file to collect real-time performance metrics
-
-**Requirements:**
- * Version of bind must be 9.6 +
- * Netdata must have permissions to run `rndc stats`
-
-It produces:
-
-1. **Name server statistics**
- * requests
- * responses
- * success
- * auth_answer
- * nonauth_answer
- * nxrrset
- * failure
- * nxdomain
- * recursion
- * duplicate
- * rejections
-
-2. **Incoming queries**
- * RESERVED0
- * A
- * NS
- * CNAME
- * SOA
- * PTR
- * MX
- * TXT
- * X25
- * AAAA
- * SRV
- * NAPTR
- * A6
- * DS
- * RSIG
- * DNSKEY
- * SPF
- * ANY
- * DLV
-
-3. **Outgoing queries**
- * Same as Incoming queries
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- named_stats_path : '/var/log/bind/named.stats'
-```
-
-If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
-
----
-
-# chrony
-
-This module monitors the precision and statistics of a local chronyd server.
-
-It produces:
-
-* frequency
-* last offset
-* RMS offset
-* residual freq
-* root delay
-* root dispersion
-* skew
-* system time
-
-**Requirements:**
-Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
-
-### Configuration
-
-Sample:
-```yaml
-# data collection frequency:
-update_every: 1
-
-# chrony query command:
-local:
- command: 'chronyc -n tracking'
-```
-
----
-
-# ceph
-
-This module monitors the ceph cluster usage and consuption data of a server.
-
-It produces:
-
-* Cluster statistics (usage, available, latency, objects, read/write rate)
-* OSD usage
-* OSD latency
-* Pool usage
-* Pool read/write operations
-* Pool read/write rate
-* number of objects per pool
-
-**Requirements:**
-
-- `rados` python module
-- Granting read permissions to ceph group from keyring file
-```shell
-# chmod 640 /etc/ceph/ceph.client.admin.keyring
-```
-
-### Configuration
-
-Sample:
-```yaml
-local:
- config_file: '/etc/ceph/ceph.conf'
- keyring_file: '/etc/ceph/ceph.client.admin.keyring'
-```
-
----
-
-# couchdb
-
-This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
-
-* Overall server reads/writes
-* HTTP traffic breakdown
- * Request methods (`GET`, `PUT`, `POST`, etc.)
- * Response status codes (`200`, `201`, `4xx`, etc.)
-* Active server tasks
-* Replication status (CouchDB 2.1 and up only)
-* Erlang VM stats
-* Optional per-database statistics: sizes, # of docs, # of deleted docs
-
-### Configuration
-
-Sample for a local server running on port 5984:
-```yaml
-local:
- user: 'admin'
- pass: 'password'
- node: 'couchdb@127.0.0.1'
-```
-
-Be sure to specify a correct admin-level username and password.
-
-You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
-
-If you want per-database statistics, these need to be added to the configuration, separated by spaces:
-```yaml
-local:
- ...
- databases: 'db1 db2 db3 ...'
-```
-
----
-
-# cpufreq
-
-This module shows the current CPU frequency as set by the cpufreq kernel
-module.
-
-**Requirement:**
-You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
-enabled in your kernel.
-
-This module tries to read from one of two possible locations. On
-initialization, it tries to read the `time_in_state` files provided by
-cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it
-falls back to using the more inaccurate `scaling_cur_freq` file (which only
-represents the **current** CPU frequency, and doesn't account for any state
-changes which happen between updates).
-
-It produces one chart with multiple lines (one line per core).
-
-### configuration
-
-Sample:
-
-```yaml
-sys_dir: "/sys/devices"
-```
-
-If no configuration is given, module will search for cpufreq files in `/sys/devices` directory.
-Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
-
----
-
-# cpuidle
-
-This module monitors the usage of CPU idle states.
-
-**Requirement:**
-Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
-
-It produces one stacked chart per CPU, showing the percentage of time spent in
-each state.
-
----
-# dns_query_time
-
-This module provides dns query time statistics.
-
-**Requirement:**
-* `python-dnspython` package
-
-It produces one aggregate chart or one chart per dns server, showing the query time.
-
----
-
-# dnsdist
-
-Module monitor dnsdist performance and health metrics.
-
-Following charts are drawn:
-
-1. **Response latency**
- * latency-slow
- * latency100-1000
- * latency50-100
- * latency10-50
- * latency1-10
- * latency0-1
-
-2. **Cache performance**
- * cache-hits
- * cache-misses
-
-3. **ACL events**
- * acl-drops
- * rule-drop
- * rule-nxdomain
- * rule-refused
-
-4. **Noncompliant data**
- * empty-queries
- * no-policy
- * noncompliant-queries
- * noncompliant-responses
-
-5. **Queries**
- * queries
- * rdqueries
- * rdqueries
-
-6. **Health**
- * downstream-send-errors
- * downstream-timeouts
- * servfail-responses
- * trunc-failures
-
-### configuration
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://127.0.0.1:5053/jsonstat?command=stats'
- user : 'username'
- pass : 'password'
- header:
- X-API-Key: 'dnsdist-api-key'
-```
-
-# dovecot
-
-This module provides statistics information from dovecot server.
-Statistics are taken from dovecot socket by executing `EXPORT global` command.
-More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
-
-**Requirement:**
-Dovecot unix socket with R/W permissions for user netdata or dovecot with configured TCP/IP socket.
-
-Module gives information with following charts:
-
-1. **sessions**
- * active sessions
-
-2. **logins**
- * logins
-
-3. **commands** - number of IMAP commands
- * commands
-
-4. **Faults**
- * minor
- * major
-
-5. **Context Switches**
- * volountary
- * involountary
-
-6. **disk** in bytes/s
- * read
- * write
-
-7. **bytes** in bytes/s
- * read
- * write
-
-8. **number of syscalls** in syscalls/s
- * read
- * write
-
-9. **lookups** - number of lookups per second
- * path
- * attr
-
-10. **hits** - number of cache hits
- * hits
-
-11. **attempts** - authorization attemts
- * success
- * failure
-
-12. **cache** - cached authorization hits
- * hit
- * miss
-
-### configuration
-
-Sample:
-
-```yaml
-localtcpip:
- name : 'local'
- host : '127.0.0.1'
- port : 24242
-
-localsocket:
- name : 'local'
- socket : '/var/run/dovecot/stats'
-```
-
-If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
-
----
-
-# elasticsearch
-
-Module monitor elasticsearch performance and health metrics
-
-It produces:
-
-1. **Search performance** charts:
- * Number of queries, fetches
- * Time spent on queries, fetches
- * Query and fetch latency
-
-2. **Indexing performance** charts:
- * Number of documents indexed, index refreshes, flushes
- * Time spent on indexing, refreshing, flushing
- * Indexing and flushing latency
-
-3. **Memory usage and garbace collection** charts:
- * JVM heap currently in use, commited
- * Count of garbage collections
- * Time spent on garbage collections
-
-4. **Host metrics** charts:
- * Available file descriptors in percent
- * Opened HTTP connections
- * Cluster communication transport metrics
-
-5. **Queues and rejections** charts:
- * Number of queued/rejected threads in thread pool
-
-6. **Fielddata cache** charts:
- * Fielddata cache size
- * Fielddata evictions and circuit breaker tripped count
-
-7. **Cluster health API** charts:
- * Cluster status
- * Nodes and tasks statistics
- * Shards statistics
-
-8. **Cluster stats API** charts:
- * Nodes statistics
- * Query cache statistics
- * Docs statistics
- * Store statistics
- * Indices and shards statistics
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- host : 'ipaddress' # Server ip address or hostname
- port : 'password' # Port on which elasticsearch listed
- cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default.
- cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default.
-```
-
-If no configuration is given, module will fail to run.
-
----
-
-# exim
-
-Simple module executing `exim -bpc` to grab exim queue.
-This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
-
-It produces only one chart:
-
-1. **Exim Queue Emails**
- * emails
-
-Configuration is not needed.
-
----
-
-# fail2ban
-
-Module monitor fail2ban log file to show all bans for all active jails
-
-**Requirements:**
- * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
-
-It produces one chart with multiple lines (one line per jail)
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- log_path: '/var/log/fail2ban.log'
- conf_path: '/etc/fail2ban/jail.local'
- exclude: 'dropbear apache'
-```
-If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
-If conf file is not found default jail is `ssh`.
-
----
-
-# freeradius
-
-Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
-
-It produces:
-
-1. **Authentication counters:**
- * access-accepts
- * access-rejects
- * auth-dropped-requests
- * auth-duplicate-requests
- * auth-invalid-requests
- * auth-malformed-requests
- * auth-unknown-types
-
-2. **Accounting counters:** [optional]
- * accounting-requests
- * accounting-responses
- * acct-dropped-requests
- * acct-duplicate-requests
- * acct-invalid-requests
- * acct-malformed-requests
- * acct-unknown-types
-
-3. **Proxy authentication counters:** [optional]
- * proxy-access-accepts
- * proxy-access-rejects
- * proxy-auth-dropped-requests
- * proxy-auth-duplicate-requests
- * proxy-auth-invalid-requests
- * proxy-auth-malformed-requests
- * proxy-auth-unknown-types
-
-4. **Proxy accounting counters:** [optional]
- * proxy-accounting-requests
- * proxy-accounting-responses
- * proxy-acct-dropped-requests
- * proxy-acct-duplicate-requests
- * proxy-acct-invalid-requests
- * proxy-acct-malformed-requests
- * proxy-acct-unknown-typesa
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- host : 'localhost'
- port : '18121'
- secret : 'adminsecret'
- acct : False # Freeradius accounting statistics.
- proxy_auth : False # Freeradius proxy authentication statistics.
- proxy_acct : False # Freeradius proxy accounting statistics.
-```
-
-**Freeradius server configuration:**
-
-The configuration for the status server is automatically created in the sites-available directory.
-By default, server is enabled and can be queried from every client.
-FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
-
-To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
- * cd sites-enabled
- * ln -s ../sites-available/status status
-
-and restart/reload your FREERADIUS server.
-
----
-
-# go_expvar
-
----
-
-The `go_expvar` module can monitor any Go application that exposes its metrics with the use of `expvar` package from the Go standard library.
-
-`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts. Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications) for more info.
-
-For the memory statistics, it produces the following charts:
-
-1. **Heap allocations** in kB
- * alloc: size of objects allocated on the heap
- * inuse: size of allocated heap spans
-
-2. **Stack allocations** in kB
- * inuse: size of allocated stack spans
-
-3. **MSpan allocations** in kB
- * inuse: size of allocated mspan structures
-
-4. **MCache allocations** in kB
- * inuse: size of allocated mcache structures
-
-5. **Virtual memory** in kB
- * sys: size of reserved virtual address space
-
-6. **Live objects**
- * live: number of live objects in memory
-
-7. **GC pauses average** in ns
- * avg: average duration of all GC stop-the-world pauses
-
-### configuration
-
-Please see the [wiki page](https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#using-netdata-go_expvar-module) for detailed info about module configuration.
-
----
-
-# haproxy
-
-Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
-And health metrics such as backend servers status (server check should be used).
-
-Plugin can obtain data from url **OR** unix socket.
-
-**Requirement:**
-Socket MUST be readable AND writable by netdata user.
-
-It produces:
-
-1. **Frontend** family charts
- * Kilobytes in/s
- * Kilobytes out/s
- * Sessions current
- * Sessions in queue current
-
-2. **Backend** family charts
- * Kilobytes in/s
- * Kilobytes out/s
- * Sessions current
- * Sessions in queue current
-
-3. **Health** chart
- * number of failed servers for every backend (in DOWN state)
-
-
-### configuration
-
-Sample:
-
-```yaml
-via_url:
- user : 'username' # ONLY IF stats auth is used
- pass : 'password' # # ONLY IF stats auth is used
- url : 'http://ip.address:port/url;csv;norefresh'
-```
-
-OR
-
-```yaml
-via_socket:
- socket : 'path/to/haproxy/sock'
-```
-
-If no configuration is given, module will fail to run.
-
----
-
-# hddtemp
-
-Module monitors disk temperatures from one or more hddtemp daemons.
-
-**Requirement:**
-Running `hddtemp` in daemonized mode with access on tcp port
-
-It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
-
-### configuration
-
-Sample:
-
-```yaml
-update_every: 3
-host: "127.0.0.1"
-port: 7634
-```
-
-If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
-
----
-
-# httpcheck
-
-Module monitors remote http server for availability and response time.
-
-Following charts are drawn per job:
-
-1. **Response time** ms
- * Time in 0.1 ms resolution in which the server responds.
- If the connection failed, the value is missing.
-
-2. **Status** boolean
- * Connection successful
- * Unexpected content: No Regex match found in the response
- * Unexpected status code: Do we get 500 errors?
- * Connection failed: port not listening or blocked
- * Connection timed out: host or port unreachable
-
-### configuration
-
-Sample configuration and their default values.
-
-```yaml
-server:
- url: 'http://host:port/path' # required
- status_accepted: # optional
- - 200
- timeout: 1 # optional, supports decimals (e.g. 0.2)
- update_every: 3 # optional
- regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html
- redirect: yes # optional
-```
-
-### notes
-
- * The status chart is primarily intended for alarms, badges or for access via API.
- * A system/service/firewall might block netdata's access if a portscan or
- similar is detected.
- * This plugin is meant for simple use cases. Currently, the accuracy of the
- response time is low and should be used as reference only.
-
----
-
-# icecast
-
-This module will monitor number of listeners for active sources.
-
-**Requirements:**
- * icecast version >= 2.4.0
-
-It produces the following charts:
-
-1. **Listeners** in listeners
- * source number
-
-### configuration
-
-Needs only `url` to server's `/status-json.xsl`
-
-Here is an example for remote server:
-
-```yaml
-remote:
- url : 'http://1.2.3.4:8443/status-json.xsl'
-```
-
-Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
-
----
-
-# IPFS
-
-Module monitors [IPFS](https://ipfs.io) basic information.
-
-1. **Bandwidth** in kbits/s
- * in
- * out
-
-2. **Peers**
- * peers
-
-### configuration
-
-Only url to IPFS server is needed.
-
-Sample:
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://localhost:5001'
-```
-
----
-
-# isc_dhcpd
-
-Module monitor leases database to show all active leases for given pools.
-
-**Requirements:**
- * dhcpd leases file MUST BE readable by netdata
- * pools MUST BE in CIDR format
-
-It produces:
-
-1. **Pools utilization** Aggregate chart for all pools.
- * utilization in percent
-
-2. **Total leases**
- * leases (overall number of leases for all pools)
-
-3. **Active leases** for every pools
- * leases (number of active leases in pool)
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- leases_path : '/var/lib/dhcp/dhcpd.leases'
- pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
-```
-
-In case of python2 you need to install `py2-ipaddress` to make plugin work.
-The module will not work If no configuration is given.
-
----
-
-
-# mdstat
-
-Module monitor /proc/mdstat
-
-It produces:
-
-1. **Health** Number of failed disks in every array (aggregate chart).
-
-2. **Disks stats**
- * total (number of devices array ideally would have)
- * inuse (number of devices currently are in use)
-
-3. **Current status**
- * resync in percent
- * recovery in percent
- * reshape in percent
- * check in percent
-
-4. **Operation status** (if resync/recovery/reshape/check is active)
- * finish in minutes
- * speed in megabytes/s
-
-### configuration
-No configuration is needed.
-
----
-
-# memcached
-
-Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
-
-1. **Network** in kilobytes/s
- * read
- * written
-
-2. **Connections** per second
- * current
- * rejected
- * total
-
-3. **Items** in cluster
- * current
- * total
-
-4. **Evicted and Reclaimed** items
- * evicted
- * reclaimed
-
-5. **GET** requests/s
- * hits
- * misses
-
-6. **GET rate** rate in requests/s
- * rate
-
-7. **SET rate** rate in requests/s
- * rate
-
-8. **DELETE** requests/s
- * hits
- * misses
-
-9. **CAS** requests/s
- * hits
- * misses
- * bad value
-
-10. **Increment** requests/s
- * hits
- * misses
-
-11. **Decrement** requests/s
- * hits
- * misses
-
-12. **Touch** requests/s
- * hits
- * misses
-
-13. **Touch rate** rate in requests/s
- * rate
-
-### configuration
-
-Sample:
-
-```yaml
-localtcpip:
- name : 'local'
- host : '127.0.0.1'
- port : 24242
-```
-
-If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
-
----
-
-# mongodb
-
-Module monitor mongodb performance and health metrics
-
-**Requirements:**
- * `python-pymongo` package.
-
-You need to install it manually.
-
-
-Number of charts depends on mongodb version, storage engine and other features (replication):
-
-1. **Read requests**:
- * query
- * getmore (operation the cursor executes to get additional data from query)
-
-2. **Write requests**:
- * insert
- * delete
- * update
-
-3. **Active clients**:
- * readers (number of clients with read operations in progress or queued)
- * writers (number of clients with write operations in progress or queued)
-
-4. **Journal transactions**:
- * commits (count of transactions that have been written to the journal)
-
-5. **Data written to the journal**:
- * volume (volume of data)
-
-6. **Background flush** (MMAPv1):
- * average ms (average time taken by flushes to execute)
- * last ms (time taken by the last flush)
-
-8. **Read tickets** (WiredTiger):
- * in use (number of read tickets in use)
- * available (number of available read tickets remaining)
-
-9. **Write tickets** (WiredTiger):
- * in use (number of write tickets in use)
- * available (number of available write tickets remaining)
-
-10. **Cursors**:
- * opened (number of cursors currently opened by MongoDB for clients)
- * timedOut (number of cursors that have timed)
- * noTimeout (number of open cursors with timeout disabled)
-
-11. **Connections**:
- * connected (number of clients currently connected to the database server)
- * unused (number of unused connections available for new clients)
-
-12. **Memory usage metrics**:
- * virtual
- * resident (amount of memory used by the database process)
- * mapped
- * non mapped
-
-13. **Page faults**:
- * page faults (number of times MongoDB had to request from disk)
-
-14. **Cache metrics** (WiredTiger):
- * percentage of bytes currently in the cache (amount of space taken by cached data)
- * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
-
-15. **Pages evicted from cache** (WiredTiger):
- * modified
- * unmodified
-
-16. **Queued requests**:
- * readers (number of read request currently queued)
- * writers (number of write request currently queued)
-
-17. **Errors**:
- * msg (number of message assertions raised)
- * warning (number of warning assertions raised)
- * regular (number of regular assertions raised)
- * user (number of assertions corresponding to errors generated by users)
-
-18. **Storage metrics** (one chart for every database)
- * dataSize (size of all documents + padding in the database)
- * indexSize (size of all indexes in the database)
- * storageSize (size of all extents in the database)
-
-19. **Documents in the database** (one chart for all databases)
- * documents (number of objects in the database among all the collections)
-
-20. **tcmalloc metrics**
- * central cache free
- * current total thread cache
- * pageheap free
- * pageheap unmapped
- * thread cache free
- * transfer cache free
- * heap size
-
-21. **Commands total/failed rate**
- * count
- * createIndex
- * delete
- * eval
- * findAndModify
- * insert
-
-22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
- * Global lock
- * Database lock
- * Collection lock
- * Metadata lock
- * oplog lock
-
-23. **Replica set members state**
- * state
-
-24. **Oplog window**
- * window (interval of time between the oldest and the latest entries in the oplog)
-
-25. **Replication lag**
- * member (time when last entry from the oplog was applied for every member)
-
-26. **Replication set member heartbeat latency**
- * member (time when last heartbeat was received from replica set member)
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- name : 'local'
- host : '127.0.0.1'
- port : 27017
- user : 'netdata'
- pass : 'netdata'
-
-```
-
-If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
-
----
-
-
-# mysql
-
-Module monitors one or more mysql servers
-
-**Requirements:**
- * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
-
-It will produce following charts (if data is available):
-
-1. **Bandwidth** in kbps
- * in
- * out
-
-2. **Queries** in queries/sec
- * queries
- * questions
- * slow queries
-
-3. **Operations** in operations/sec
- * opened tables
- * flush
- * commit
- * delete
- * prepare
- * read first
- * read key
- * read next
- * read prev
- * read random
- * read random next
- * rollback
- * save point
- * update
- * write
-
-4. **Table Locks** in locks/sec
- * immediate
- * waited
-
-5. **Select Issues** in issues/sec
- * full join
- * full range join
- * range
- * range check
- * scan
-
-6. **Sort Issues** in issues/sec
- * merge passes
- * range
- * scan
-
-### configuration
-
-You can provide, per server, the following:
-
-1. username which have access to database (deafults to 'root')
-2. password (defaults to none)
-3. mysql my.cnf configuration file
-4. mysql socket (optional)
-5. mysql host (ip or hostname)
-6. mysql port (defaults to 3306)
-
-Here is an example for 3 servers:
-
-```yaml
-update_every : 10
-priority : 90100
-retries : 5
-
-local:
- 'my.cnf' : '/etc/mysql/my.cnf'
- priority : 90000
-
-local_2:
- user : 'root'
- pass : 'blablablabla'
- socket : '/var/run/mysqld/mysqld.sock'
- update_every : 1
-
-remote:
- user : 'admin'
- pass : 'bla'
- host : 'example.org'
- port : 9000
- retries : 20
-```
-
-If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root`
-
----
-
-# nginx
-
-This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
-
-**Requirements:**
- * nginx with configured 'ngx_http_stub_status_module'
- * 'location /stub_status'
-
-Example nginx configuration can be found in 'python.d/nginx.conf'
-
-It produces following charts:
-
-1. **Active Connections**
- * active
-
-2. **Requests** in requests/s
- * requests
-
-3. **Active Connections by Status**
- * reading
- * writing
- * waiting
-
-4. **Connections Rate** in connections/s
- * accepts
- * handled
-
-### configuration
-
-Needs only `url` to server's `stub_status`
-
-Here is an example for local server:
-
-```yaml
-update_every : 10
-priority : 90100
-
-local:
- url : 'http://localhost/stub_status'
- retries : 10
-```
-
-Without configuration, module attempts to connect to `http://localhost/stub_status`
-
----
-
-# nginx_plus
-
-This module will monitor one or more nginx_plus servers depending on configuration.
-Servers can be either local or remote.
-
-Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
-
-It produces following charts:
-
-1. **Requests total** in requests/s
- * total
-
-2. **Requests current** in requests
- * current
-
-3. **Connection Statistics** in connections/s
- * accepted
- * dropped
-
-4. **Workers Statistics** in workers
- * idle
- * active
-
-5. **SSL Handshakes** in handshakes/s
- * successful
- * failed
-
-6. **SSL Session Reuses** in sessions/s
- * reused
-
-7. **SSL Memory Usage** in percent
- * usage
-
-8. **Processes** in processes
- * respawned
-
-For every server zone:
-
-1. **Processing** in requests
- * processing
-
-2. **Requests** in requests/s
- * requests
-
-3. **Responses** in requests/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-4. **Traffic** in kilobits/s
- * received
- * sent
-
-For every upstream:
-
-1. **Peers Requests** in requests/s
- * peer name (dimension per peer)
-
-2. **All Peers Responses** in responses/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-3. **Peer Responses** in requests/s (for every peer)
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-4. **Peers Connections** in active
- * peer name (dimension per peer)
-
-5. **Peers Connections Usage** in percent
- * peer name (dimension per peer)
-
-6. **All Peers Traffic** in KB
- * received
- * sent
-
-7. **Peer Traffic** in KB/s (for every peer)
- * received
- * sent
-
-8. **Peer Timings** in ms (for every peer)
- * header
- * response
-
-9. **Memory Usage** in percent
- * usage
-
-10. **Peers Status** in state
- * peer name (dimension per peer)
-
-11. **Peers Total Downtime** in seconds
- * peer name (dimension per peer)
-
-For every cache:
-
-1. **Traffic** in KB
- * served
- * written
- * bypass
-
-2. **Memory Usage** in percent
- * usage
-
-### configuration
-
-Needs only `url` to server's `status`
-
-Here is an example for local server:
-
-```yaml
-local:
- url : 'http://localhost/status'
-```
-
-Without configuration, module fail to start.
-
----
-
-# nsd
-
-Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
-
-**Requirements:**
- * Version of `nsd` must be 4.0+
- * Netdata must have permissions to run `nsd-control stats_noreset`
-
-It produces:
-
-1. **Queries**
- * queries
-
-2. **Zones**
- * master
- * slave
-
-3. **Protocol**
- * udp
- * udp6
- * tcp
- * tcp6
-
-4. **Query Type**
- * A
- * NS
- * CNAME
- * SOA
- * PTR
- * HINFO
- * MX
- * NAPTR
- * TXT
- * AAAA
- * SRV
- * ANY
-
-5. **Transfer**
- * NOTIFY
- * AXFR
-
-6. **Return Code**
- * NOERROR
- * FORMERR
- * SERVFAIL
- * NXDOMAIN
- * NOTIMP
- * REFUSED
- * YXDOMAIN
-
-
-Configuration is not needed.
-
----
-
-# ntpd
-
-Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
-
-**Requirements:**
- * Version: `NTPv4`
- * Local interrogation allowed in `/etc/ntp.conf` (default):
-
-```
-# Local users may interrogate the ntp server more closely.
-restrict 127.0.0.1
-restrict ::1
-```
-
-It produces:
-
-1. system
- * offset
- * jitter
- * frequency
- * delay
- * dispersion
- * stratum
- * tc
- * precision
-
-2. peers
- * offset
- * delay
- * dispersion
- * jitter
- * rootdelay
- * rootdispersion
- * stratum
- * hmode
- * pmode
- * hpoll
- * ppoll
- * precision
-
-**configuration**
-
-Sample:
-
-```yaml
-update_every: 10
-
-host: 'localhost'
-port: '123'
-show_peers: yes
-# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16
-peer_filter: '(127\..*)|(192\.168\..*)'
-# check for new/changed peers every 60 updates
-peer_rescan: 60
-```
-
-Sample (multiple jobs):
-
-Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`.
-
-```yaml
-local:
- host: 'localhost'
-
-otherhost:
- host: 'otherhost'
-```
-
-If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
-
----
-
-# ovpn_status_log
-
-Module monitor openvpn-status log file.
-
-**Requirements:**
-
- * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
- so that multiple instances do not overwrite each other's output files.
-
- * Make sure NETDATA USER CAN READ openvpn-status.log
-
- * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
-
-It produces:
-
-1. **Users** OpenVPN active users
- * users
-
-2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
- * in
- * out
-
-### configuration
-
-Sample:
-
-```yaml
-default
- log_path : '/var/log/openvpn-status.log'
-```
-
----
-
-# phpfpm
-
-This module will monitor one or more php-fpm instances depending on configuration.
-
-**Requirements:**
- * php-fpm with enabled `status` page
- * access to `status` page via web server
-
-It produces following charts:
-
-1. **Active Connections**
- * active
- * maxActive
- * idle
-
-2. **Requests** in requests/s
- * requests
-
-3. **Performance**
- * reached
- * slow
-
-### configuration
-
-Needs only `url` to server's `status`
-
-Here is an example for local instance:
-
-```yaml
-update_every : 3
-priority : 90100
-
-local:
- url : 'http://localhost/status'
- retries : 10
-```
-
-Without configuration, module attempts to connect to `http://localhost/status`
-
----
-
-# portcheck
-
-Module monitors a remote TCP service.
-
-Following charts are drawn per host:
-
-1. **Latency** ms
- * Time required to connect to a TCP port.
- Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
-
-2. **Status** boolean
- * Connection successful
- * Could not create socket: possible DNS problems
- * Connection refused: port not listening or blocked
- * Connection timed out: host or port unreachable
-
-
-### configuration
-
-```yaml
-server:
- host: 'dns or ip' # required
- port: 22 # required
- timeout: 1 # optional
- update_every: 1 # optional
-```
-
-### notes
-
- * The error chart is intended for alarms, badges or for access via API.
- * A system/service/firewall might block netdata's access if a portscan or
- similar is detected.
- * Currently, the accuracy of the latency is low and should be used as reference only.
-
----
-
-# postfix
-
-Simple module executing `postfix -p` to grab postfix queue.
-
-It produces only two charts:
-
-1. **Postfix Queue Emails**
- * emails
-
-2. **Postfix Queue Emails Size** in KB
- * size
-
-Configuration is not needed.
-
----
-
-# postgres
-
-Module monitors one or more postgres servers.
-
-**Requirements:**
-
- * `python-psycopg2` package. You have to install to manually.
-
-Following charts are drawn:
-
-1. **Database size** MB
- * size
-
-2. **Current Backend Processes** processes
- * active
-
-3. **Write-Ahead Logging Statistics** files/s
- * total
- * ready
- * done
-
-4. **Checkpoints** writes/s
- * scheduled
- * requested
-
-5. **Current connections to db** count
- * connections
-
-6. **Tuples returned from db** tuples/s
- * sequential
- * bitmap
-
-7. **Tuple reads from db** reads/s
- * disk
- * cache
-
-8. **Transactions on db** transactions/s
- * commited
- * rolled back
-
-9. **Tuples written to db** writes/s
- * inserted
- * updated
- * deleted
- * conflicts
-
-10. **Locks on db** count per type
- * locks
-
-### configuration
-
-```yaml
-socket:
- name : 'socket'
- user : 'postgres'
- database : 'postgres'
-
-tcp:
- name : 'tcp'
- user : 'postgres'
- database : 'postgres'
- host : 'localhost'
- port : 5432
-```
-
-When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
-
----
-
-# powerdns
-
-Module monitor powerdns performance and health metrics.
-
-Following charts are drawn:
-
-1. **Queries and Answers**
- * udp-queries
- * udp-answers
- * tcp-queries
- * tcp-answers
-
-2. **Cache Usage**
- * query-cache-hit
- * query-cache-miss
- * packetcache-hit
- * packetcache-miss
-
-3. **Cache Size**
- * query-cache-size
- * packetcache-size
- * key-cache-size
- * meta-cache-size
-
-4. **Latency**
- * latency
-
-### configuration
-
-```yaml
-local:
- name : 'local'
- url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
- header :
- X-API-Key: 'change_me'
-```
-
----
-
-# rabbitmq
-
-Module monitor rabbitmq performance and health metrics.
-
-Following charts are drawn:
-
-1. **Queued Messages**
- * ready
- * unacknowledged
-
-2. **Message Rates**
- * ack
- * redelivered
- * deliver
- * publish
-
-3. **Global Counts**
- * channels
- * consumers
- * connections
- * queues
- * exchanges
-
-4. **File Descriptors**
- * used descriptors
-
-5. **Socket Descriptors**
- * used descriptors
-
-6. **Erlang processes**
- * used processes
-
-7. **Erlang run queue**
- * Erlang run queue
-
-8. **Memory**
- * free memory in megabytes
-
-9. **Disk Space**
- * free disk space in gigabytes
-
-### configuration
-
-```yaml
-socket:
- name : 'local'
- host : '127.0.0.1'
- port : 15672
- user : 'guest'
- pass : 'guest'
-
-```
-
-When no configuration file is found, module tries to connect to: `localhost:15672`.
-
----
-
-# redis
-
-Get INFO data from redis instance.
-
-Following charts are drawn:
-
-1. **Operations** per second
- * operations
-
-2. **Hit rate** in percent
- * rate
-
-3. **Memory utilization** in kilobytes
- * total
- * lua
-
-4. **Database keys**
- * lines are creates dynamically based on how many databases are there
-
-5. **Clients**
- * connected
- * blocked
-
-6. **Slaves**
- * connected
-
-### configuration
-
-```yaml
-socket:
- name : 'local'
- socket : '/var/lib/redis/redis.sock'
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 6379
-```
-
-When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
-
----
-
-# samba
-
-Performance metrics of Samba file sharing.
-
-It produces the following charts:
-
-1. **Syscall R/Ws** in kilobytes/s
- * sendfile
- * recvfle
-
-2. **Smb2 R/Ws** in kilobytes/s
- * readout
- * writein
- * readin
- * writeout
-
-3. **Smb2 Create/Close** in operations/s
- * create
- * close
-
-4. **Smb2 Info** in operations/s
- * getinfo
- * setinfo
-
-5. **Smb2 Find** in operations/s
- * find
-
-6. **Smb2 Notify** in operations/s
- * notify
-
-7. **Smb2 Lesser Ops** as counters
- * tcon
- * negprot
- * tdis
- * cancel
- * logoff
- * flush
- * lock
- * keepalive
- * break
- * sessetup
-
-### configuration
-
-Requires that smbd has been compiled with profiling enabled. Also required
-that `smbd` was started either with the `-P 1` option or inside `smb.conf`
-using `smbd profiling level`.
-
-This plugin uses `smbstatus -P` which can only be executed by root. It uses
-sudo and assumes that it is configured such that the `netdata` user can
-execute smbstatus as root without password.
-
-For example:
-
- netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
-
-```yaml
-update_every : 5 # update frequency
-```
-
----
-
-# sensors
-
-System sensors information.
-
-Charts are created dynamically.
-
-### configuration
-
-For detailed configuration information please read [`sensors.conf`](https://github.com/firehol/netdata/blob/master/conf.d/python.d/sensors.conf) file.
-
-### possible issues
-
-There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
-We are tracking such cases in issue [#827](https://github.com/firehol/netdata/issues/827).
-Please join this discussion for help.
-
----
-
-# springboot
-
-This module will monitor one or more Java Spring-boot applications depending on configuration.
-
-It produces following charts:
-
-1. **Response Codes** in requests/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
- * others
-
-2. **Threads**
- * daemon
- * total
-
-3. **GC Time** in milliseconds and **GC Operations** in operations/s
- * Copy
- * MarkSweep
- * ...
-
-4. **Heap Mmeory Usage** in KB
- * used
- * committed
-
-### configuration
-
-Please see the [Monitoring Java Spring Boot Applications](https://github.com/firehol/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration.
-
----
-
-# squid
-
-This module will monitor one or more squid instances depending on configuration.
-
-It produces following charts:
-
-1. **Client Bandwidth** in kilobits/s
- * in
- * out
- * hits
-
-2. **Client Requests** in requests/s
- * requests
- * hits
- * errors
-
-3. **Server Bandwidth** in kilobits/s
- * in
- * out
-
-4. **Server Requests** in requests/s
- * requests
- * errors
-
-### configuration
-
-```yaml
-priority : 50000
-
-local:
- request : 'cache_object://localhost:3128/counters'
- host : 'localhost'
- port : 3128
-```
-
-Without any configuration module will try to autodetect where squid presents its `counters` data
-
----
-
-# smartd_log
-
-Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
-
-It produces following charts (you can add additional attributes in the module configuration file):
-
-1. **Read Error Rate** attribute 1
-
-2. **Start/Stop Count** attribute 4
-
-3. **Reallocated Sectors Count** attribute 5
-
-4. **Seek Error Rate** attribute 7
-
-5. **Power-On Hours Count** attribute 9
-
-6. **Power Cycle Count** attribute 12
-
-7. **Load/Unload Cycles** attribute 193
-
-8. **Temperature** attribute 194
-
-9. **Current Pending Sectors** attribute 197
-
-10. **Off-Line Uncorrectable** attribute 198
-
-11. **Write Error Rate** attribute 200
-
-### configuration
-
-```yaml
-local:
- log_path : '/var/log/smartd/'
-```
-
-If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory.
-
----
-
-# tomcat
-
-Present tomcat containers memory utilization.
-
-Charts:
-
-1. **Requests** per second
- * accesses
-
-2. **Volume** in KB/s
- * volume
-
-3. **Threads**
- * current
- * busy
-
-4. **JVM Free Memory** in MB
- * jvm
-
-### configuration
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://127.0.0.1:8080/manager/status?XML=true'
- user : 'tomcat_username'
- pass : 'secret_tomcat_password'
-```
-
-Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials.
-So it will probably fail.
-
----
-
-# Traefik
-
-Module uses the `health` API to provide statistics.
-
-It produces:
-
-1. **Responses** by statuses
- * success (1xx, 2xx, 304)
- * error (5xx)
- * redirect (3xx except 304)
- * bad (4xx)
- * other (all other responses)
-
-2. **Responses** by codes
- * 2xx (successful)
- * 5xx (internal server errors)
- * 3xx (redirect)
- * 4xx (bad)
- * 1xx (informational)
- * other (non-standart responses)
-
-3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
-
-4. **Requests**/s
- * request statistics
-
-5. **Total response time**
- * sum of all response time
-
-6. **Average response time**
-
-7. **Average response time per iteration**
-
-8. **Uptime**
- * Traefik server uptime
-
-### configuration
-
-Needs only `url` to server's `health`
-
-Here is an example for local server:
-
-```yaml
-update_every : 1
-priority : 60000
-
-local:
- url : 'http://localhost:8080/health'
- retries : 10
-```
-
-Without configuration, module attempts to connect to `http://localhost:8080/health`.
-
----
-
-# varnish cache
-
-Module uses the `varnishstat` command to provide varnish cache statistics.
-
-It produces:
-
-1. **Connections Statistics** in connections/s
- * accepted
- * dropped
-
-2. **Client Requests** in requests/s
- * received
-
-3. **All History Hit Rate Ratio** in percent
- * hit
- * miss
- * hitpass
-
-4. **Current Poll Hit Rate Ratio** in percent
- * hit
- * miss
- * hitpass
-
-5. **Expired Objects** in expired/s
- * objects
-
-6. **Least Recently Used Nuked Objects** in nuked/s
- * objects
-
-
-7. **Number Of Threads In All Pools** in threads
- * threads
-
-8. **Threads Statistics** in threads/s
- * created
- * failed
- * limited
-
-9. **Current Queue Length** in requests
- * in queue
-
-10. **Backend Connections Statistics** in connections/s
- * successful
- * unhealthy
- * reused
- * closed
- * resycled
- * failed
-
-10. **Requests To The Backend** in requests/s
- * received
-
-11. **ESI Statistics** in problems/s
- * errors
- * warnings
-
-12. **Memory Usage** in MB
- * free
- * allocated
-
-13. **Uptime** in seconds
- * uptime
-
-
-### configuration
-
-No configuration is needed.
-
----
-
-# web_log
-
-Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics.
-
-It produces following charts:
-
-1. **Response by type** requests/s
- * success (1xx, 2xx, 304)
- * error (5xx)
- * redirect (3xx except 304)
- * bad (4xx)
- * other (all other responses)
-
-2. **Response by code family** requests/s
- * 1xx (informational)
- * 2xx (successful)
- * 3xx (redirect)
- * 4xx (bad)
- * 5xx (internal server errors)
- * other (non-standart responses)
- * unmatched (the lines in the log file that are not matched)
-
-3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
-
-4. **Bandwidth** KB/s
- * received (bandwidth of requests)
- * send (bandwidth of responses)
-
-5. **Timings** ms (request processing time)
- * min (bandwidth of requests)
- * max (bandwidth of responses)
- * average (bandwidth of responses)
-
-6. **Request per url** requests/s (configured by user)
-
-7. **Http Methods** requests/s (requests per http method)
-
-8. **Http Versions** requests/s (requests per http version)
-
-9. **IP protocols** requests/s (requests per ip protocol version)
-
-10. **Curent Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration)
-
-11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata)
-
-
-### configuration
-
-```yaml
-nginx_log:
- name : 'nginx_log'
- path : '/var/log/nginx/access.log'
-
-apache_log:
- name : 'apache_log'
- path : '/var/log/apache/other_vhosts_access.log'
- categories:
- cacti : 'cacti.*'
- observium : 'observium'
-```
-
-Module has preconfigured jobs for nginx, apache and gunicorn on various distros.
-
----
diff --git a/python.d/dnsdist.chart.py b/python.d/dnsdist.chart.py
deleted file mode 100644
index b40112cbc..000000000
--- a/python.d/dnsdist.chart.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# -*- coding: utf-8 -*-
-from json import loads
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = ['queries', 'queries_dropped', 'packets_dropped', 'answers', 'backend_responses', 'backend_commerrors', 'backend_errors', 'cache', 'servercpu', 'servermem', 'query_latency', 'query_latency_avg']
-CHARTS = {
- 'queries': {
- 'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
- 'lines': [
- ['queries', 'all', 'incremental'],
- ['rdqueries', 'recursive', 'incremental'],
- ['empty-queries', 'empty', 'incremental']
- ]},
- 'queries_dropped': {
- 'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'],
- 'lines': [
- ['rule-drop', 'rule drop', 'incremental'],
- ['dyn-blocked', 'dynamic block', 'incremental'],
- ['no-policy', 'no policy', 'incremental'],
- ['noncompliant-queries', 'non compliant', 'incremental']
- ]},
- 'packets_dropped': {
- 'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'],
- 'lines': [
- ['acl-drops', 'acl', 'incremental']
- ]},
- 'answers': {
- 'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'],
- 'lines': [
- ['self-answered', 'self answered', 'incremental'],
- ['rule-nxdomain', 'nxdomain', 'incremental', -1],
- ['rule-refused', 'refused', 'incremental', -1],
- ['trunc-failures', 'trunc failures', 'incremental', -1]
- ]},
- 'backend_responses': {
- 'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'],
- 'lines': [
- ['responses', 'responses', 'incremental']
- ]},
- 'backend_commerrors': {
- 'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'],
- 'lines': [
- ['downstream-send-errors', 'send errors', 'incremental']
- ]},
- 'backend_errors': {
- 'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'],
- 'lines': [
- ['downstream-timeouts', 'timeout', 'incremental'],
- ['servfail-responses', 'servfail', 'incremental'],
- ['noncompliant-responses', 'non compliant', 'incremental']
- ]},
- 'cache': {
- 'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'],
- 'lines': [
- ['cache-hits', 'hits', 'incremental'],
- ['cache-misses', 'misses', 'incremental', -1]
- ]},
- 'servercpu': {
- 'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'],
- 'lines': [
- ['cpu-sys-msec', 'system state', 'incremental'],
- ['cpu-user-msec', 'user state', 'incremental']
- ]},
- 'servermem': {
- 'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'],
- 'lines': [
- ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576]
- ]},
- 'query_latency': {
- 'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'],
- 'lines': [
- ['latency0-1', '1ms', 'incremental'],
- ['latency1-10', '10ms', 'incremental'],
- ['latency10-50', '50ms', 'incremental'],
- ['latency50-100', '100ms', 'incremental'],
- ['latency100-1000', '1sec', 'incremental'],
- ['latency-slow', 'slow', 'incremental']
- ]},
- 'query_latency_avg': {
- 'options': [None, 'Average latency for the last N queries', 'ms/query', 'latency', 'dnsdist.query_latency_avg', 'line'],
- 'lines': [
- ['latency-avg100', '100', 'absolute'],
- ['latency-avg1000', '1k', 'absolute'],
- ['latency-avg10000', '10k', 'absolute'],
- ['latency-avg1000000', '1000k', 'absolute']
- ]}
-}
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- data = self._get_raw_data()
- if not data:
- return None
-
- return loads(data)
-
diff --git a/python.d/fail2ban.chart.py b/python.d/fail2ban.chart.py
deleted file mode 100644
index 895833f87..000000000
--- a/python.d/fail2ban.chart.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: fail2ban log netdata python.d module
-# Author: l2isbad
-
-import bisect
-
-from glob import glob
-from re import compile as r_compile
-from os import access as is_accessible, R_OK
-from os.path import isdir, getsize
-
-
-from bases.FrameworkServices.LogService import LogService
-
-priority = 60000
-retries = 60
-REGEX_JAILS = r_compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
-REGEX_DATA = r_compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>U|B)[a-z]+ (?P<ipaddr>\d{1,3}(?:\.\d{1,3}){3})')
-ORDER = ['jails_bans', 'jails_in_jail']
-
-
-class Service(LogService):
- """
- fail2ban log class
- Reads logs line by line
- Jail auto detection included
- It produces following charts:
- * Bans per second for every jail
- * Banned IPs for every jail (since the last restart of netdata)
- """
- def __init__(self, configuration=None, name=None):
- LogService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = dict()
- self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
- self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
- self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
- self.exclude = self.configuration.get('exclude')
-
- def _get_data(self):
- """
- Parse new log lines
- :return: dict
- """
- raw = self._get_raw_data()
- if raw is None:
- return None
- elif not raw:
- return self.to_netdata
-
- # Fail2ban logs looks like
- # 2016-12-25 12:36:04,711 fail2ban.actions[2455]: WARNING [ssh] Ban 178.156.32.231
- for row in raw:
- match = REGEX_DATA.search(row)
- if match:
- match_dict = match.groupdict()
- jail, action, ipaddr = match_dict['jail'], match_dict['action'], match_dict['ipaddr']
- if jail in self.jails_list:
- if action == 'B':
- self.to_netdata[jail] += 1
- if address_not_in_jail(self.banned_ips[jail], ipaddr, self.to_netdata[jail + '_in_jail']):
- self.to_netdata[jail + '_in_jail'] += 1
- else:
- if ipaddr in self.banned_ips[jail]:
- self.banned_ips[jail].remove(ipaddr)
- self.to_netdata[jail + '_in_jail'] -= 1
-
- return self.to_netdata
-
- def check(self):
- """
- :return: bool
-
- Check if the "log_path" is not empty and readable
- """
-
- if not (is_accessible(self.log_path, R_OK) and getsize(self.log_path) != 0):
- self.error('%s is not readable or empty' % self.log_path)
- return False
- self.jails_list, self.to_netdata, self.banned_ips = self.jails_auto_detection_()
- self.definitions = create_definitions_(self.jails_list)
- self.info('Jails: %s' % self.jails_list)
- return True
-
- def jails_auto_detection_(self):
- """
- return: <tuple>
-
- * jails_list - list of enabled jails (['ssh', 'apache', ...])
- * to_netdata - dict ({'ssh': 0, 'ssh_in_jail': 0, ...})
- * banned_ips - here will be stored all the banned ips ({'ssh': ['1.2.3.4', '5.6.7.8', ...], ...})
- """
- raw_jails_list = list()
- jails_list = list()
-
- for raw_jail in parse_configuration_files_(self.conf_path, self.conf_dir, self.error):
- raw_jails_list.extend(raw_jail)
-
- for jail, status in raw_jails_list:
- if status == 'true' and jail not in jails_list:
- jails_list.append(jail)
- elif status == 'false' and jail in jails_list:
- jails_list.remove(jail)
- # If for some reason parse failed we still can START with default jails_list.
- jails_list = list(set(jails_list) - set(self.exclude.split()
- if isinstance(self.exclude, str) else list())) or ['ssh']
-
- to_netdata = dict([(jail, 0) for jail in jails_list])
- to_netdata.update(dict([(jail + '_in_jail', 0) for jail in jails_list]))
- banned_ips = dict([(jail, list()) for jail in jails_list])
-
- return jails_list, to_netdata, banned_ips
-
-
-def create_definitions_(jails_list):
- """
- Chart definitions creating
- """
-
- definitions = {
- 'jails_bans': {'options': [None, 'Jails Ban Statistics', 'bans/s', 'bans', 'jail.bans', 'line'],
- 'lines': []},
- 'jails_in_jail': {'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs',
- 'in jail', 'jail.in_jail', 'line'],
- 'lines': []}}
- for jail in jails_list:
- definitions['jails_bans']['lines'].append([jail, jail, 'incremental'])
- definitions['jails_in_jail']['lines'].append([jail + '_in_jail', jail, 'absolute'])
-
- return definitions
-
-
-def parse_configuration_files_(jails_conf_path, jails_conf_dir, print_error):
- """
- :param jails_conf_path: <str>
- :param jails_conf_dir: <str>
- :param print_error: <function>
- :return: <tuple>
-
- Uses "find_jails_in_files" function to find all jails in the "jails_conf_dir" directory
- and in the "jails_conf_path"
-
- All files must endswith ".local" or ".conf"
- Return order is important.
- According man jail.conf it should be
- * jail.conf
- * jail.d/*.conf (in alphabetical order)
- * jail.local
- * jail.d/*.local (in alphabetical order)
- """
- path_conf, path_local, dir_conf, dir_local = list(), list(), list(), list()
-
- # Parse files in the directory
- if not (isinstance(jails_conf_dir, str) and isdir(jails_conf_dir)):
- print_error('%s is not a directory' % jails_conf_dir)
- else:
- dir_conf = list(filter(lambda conf: is_accessible(conf, R_OK), glob(jails_conf_dir + '/*.conf')))
- dir_local = list(filter(lambda local: is_accessible(local, R_OK), glob(jails_conf_dir + '/*.local')))
- if not (dir_conf or dir_local):
- print_error('%s is empty or not readable' % jails_conf_dir)
- else:
- dir_conf, dir_local = (find_jails_in_files(dir_conf, print_error),
- find_jails_in_files(dir_local, print_error))
-
- # Parse .conf and .local files
- if isinstance(jails_conf_path, str) and jails_conf_path.endswith(('.local', '.conf')):
- path_conf, path_local = (find_jails_in_files([jails_conf_path.split('.')[0] + '.conf'], print_error),
- find_jails_in_files([jails_conf_path.split('.')[0] + '.local'], print_error))
-
- return path_conf, dir_conf, path_local, dir_local
-
-
-def find_jails_in_files(list_of_files, print_error):
- """
- :param list_of_files: <list>
- :param print_error: <function>
- :return: <list>
-
- Open a file and parse it to find all (enabled and disabled) jails
- The output is a list of tuples:
- [('ssh', 'true'), ('apache', 'false'), ...]
- """
- jails_list = list()
- for conf in list_of_files:
- if is_accessible(conf, R_OK):
- with open(conf, 'rt') as f:
- raw_data = f.readlines()
- data = ' '.join(line for line in raw_data if line.startswith(('[', 'enabled')))
- jails_list.extend(REGEX_JAILS.findall(data))
- else:
- print_error('%s is not readable or not exist' % conf)
- return jails_list
-
-
-def address_not_in_jail(pool, address, pool_size):
- """
- :param pool: <list>
- :param address: <str>
- :param pool_size: <int>
- :return: bool
-
- Checks if the address is in the pool.
- If not address will be added
- """
- index = bisect.bisect_left(pool, address)
- if index < pool_size:
- if pool[index] == address:
- return False
- bisect.insort_left(pool, address)
- return True
- else:
- bisect.insort_left(pool, address)
- return True
diff --git a/python.d/hddtemp.chart.py b/python.d/hddtemp.chart.py
deleted file mode 100644
index 577cab09f..000000000
--- a/python.d/hddtemp.chart.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: hddtemp netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-
-import os
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-
-# default module values (can be overridden per job in `config`)
-#update_every = 2
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'host': 'localhost',
-# 'port': 7634
-# }}
-
-ORDER = ['temperatures']
-
-CHARTS = {
- 'temperatures': {
- 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'],
- 'lines': [
- # lines are created dynamically in `check()` method
- ]}}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self._keep_alive = False
- self.request = ""
- self.host = "127.0.0.1"
- self.port = 7634
- self.disks = list()
-
- def get_disks(self):
- try:
- disks = self.configuration['devices']
- self.info("Using configured disks {0}".format(disks))
- except (KeyError, TypeError):
- self.info("Autodetecting disks")
- return ["/dev/" + f for f in os.listdir("/dev") if len(f) == 3 and f.startswith("sd")]
-
- ret = list()
- for disk in disks:
- if not disk.startswith('/dev/'):
- disk = "/dev/" + disk
- ret.append(disk)
- if not ret:
- self.error("Provided disks cannot be found in /dev directory.")
- return ret
-
- def _check_raw_data(self, data):
- if not data.endswith('|'):
- return False
-
- if all(disk in data for disk in self.disks):
- return True
- return False
-
- def get_data(self):
- """
- Get data from TCP/IP socket
- :return: dict
- """
- try:
- raw = self._get_raw_data().split("|")[:-1]
- except AttributeError:
- self.error("no data received")
- return None
- data = dict()
- for i in range(len(raw) // 5):
- if not raw[i*5+1] in self.disks:
- continue
- try:
- val = int(raw[i*5+3])
- except ValueError:
- val = 0
- data[raw[i*5+1].replace("/dev/", "")] = val
-
- if not data:
- self.error("received data doesn't have needed records")
- return None
- return data
-
- def check(self):
- """
- Parse configuration, check if hddtemp is available, and dynamically create chart lines data
- :return: boolean
- """
- self._parse_config()
- self.disks = self.get_disks()
-
- data = self.get_data()
- if data is None:
- return False
-
- for name in data:
- self.definitions['temperatures']['lines'].append([name])
- return True
diff --git a/python.d/mdstat.chart.py b/python.d/mdstat.chart.py
deleted file mode 100644
index 35ba9058f..000000000
--- a/python.d/mdstat.chart.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: mdstat netdata python.d module
-# Author: l2isbad
-
-import re
-
-from collections import defaultdict
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-priority = 60000
-retries = 60
-update_every = 1
-
-ORDER = ['mdstat_health']
-CHARTS = {
- 'mdstat_health': {
- 'options': [None, 'Faulty Devices In MD', 'failed disks', 'health', 'md.health', 'line'],
- 'lines': list()
- }
-}
-
-OPERATIONS = ('check', 'resync', 'reshape', 'recovery', 'finish', 'speed')
-
-RE_DISKS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\['
- r'(?P<total_disks>[0-9]+)/'
- r'(?P<inuse_disks>[0-9]+)\]')
-
-RE_STATUS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ '
- r'(?P<operation>[a-z]+) =[ ]{1,2}'
- r'(?P<operation_status>[0-9.]+).+finish='
- r'(?P<finish>([0-9.]+))min speed='
- r'(?P<speed>[0-9]+)')
-
-
-def md_charts(md):
- order = ['{0}_disks'.format(md.name),
- '{0}_operation'.format(md.name),
- '{0}_finish'.format(md.name),
- '{0}_speed'.format(md.name)
- ]
-
- charts = dict()
- charts[order[0]] = {
- 'options': [None, 'Disks Stats', 'disks', md.name, 'md.disks', 'stacked'],
- 'lines': [
- ['{0}_total_disks'.format(md.name), 'total', 'absolute'],
- ['{0}_inuse_disks'.format(md.name), 'inuse', 'absolute']
- ]
- }
-
- charts['_'.join([md.name, 'operation'])] = {
- 'options': [None, 'Current Status', 'percent', md.name, 'md.status', 'line'],
- 'lines': [
- ['{0}_resync'.format(md.name), 'resync', 'absolute', 1, 100],
- ['{0}_recovery'.format(md.name), 'recovery', 'absolute', 1, 100],
- ['{0}_reshape'.format(md.name), 'reshape', 'absolute', 1, 100],
- ['{0}_check'.format(md.name), 'check', 'absolute', 1, 100]
- ]
- }
-
- charts['_'.join([md.name, 'finish'])] = {
- 'options': [None, 'Approximate Time Until Finish', 'seconds', md.name, 'md.rate', 'line'],
- 'lines': [
- ['{0}_finish'.format(md.name), 'finish in', 'absolute', 1, 1000]
- ]
- }
-
- charts['_'.join([md.name, 'speed'])] = {
- 'options': [None, 'Operation Speed', 'KB/s', md.name, 'md.rate', 'line'],
- 'lines': [
- ['{0}_speed'.format(md.name), 'speed', 'absolute', 1, 1000]
- ]
- }
-
- return order, charts
-
-
-class MD:
- def __init__(self, name, stats):
- self.name = name
- self.stats = stats
-
- def update_stats(self, stats):
- self.stats = stats
-
- def data(self):
- stats = dict(('_'.join([self.name, k]), v) for k, v in self.stats.items())
- stats['{0}_health'.format(self.name)] = int(self.stats['total_disks']) - int(self.stats['inuse_disks'])
- return stats
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.mds = dict()
-
- def check(self):
- arrays = find_arrays(self._get_raw_data())
- if not arrays:
- self.error('Failed to read data from /proc/mdstat or there is no active arrays')
- return None
- return True
-
- @staticmethod
- def _get_raw_data():
- """
- Read data from /proc/mdstat
- :return: str
- """
- try:
- with open('/proc/mdstat', 'rt') as proc_mdstat:
- return proc_mdstat.readlines() or None
- except (OSError, IOError):
- return None
-
- def get_data(self):
- """
- Parse data from _get_raw_data()
- :return: dict
- """
- arrays = find_arrays(self._get_raw_data())
- if not arrays:
- return None
-
- data = dict()
- for array, values in arrays.items():
-
- if array not in self.mds:
- md = MD(array, values)
- self.mds[md.name] = md
- self.create_new_array_charts(md)
- else:
- md = self.mds[array]
- md.update_stats(values)
-
- data.update(md.data())
-
- return data
-
- def create_new_array_charts(self, md):
- order, charts = md_charts(md)
-
- self.charts['mdstat_health'].add_dimension(['{0}_health'.format(md.name), md.name])
- for chart_name in order:
- params = [chart_name] + charts[chart_name]['options']
- dimensions = charts[chart_name]['lines']
-
- new_chart = self.charts.add_chart(params)
- for dimension in dimensions:
- new_chart.add_dimension(dimension)
-
-
-def find_arrays(raw_data):
- if raw_data is None:
- return None
- data = defaultdict(str)
- counter = 1
-
- for row in (elem.strip() for elem in raw_data):
- if not row:
- counter += 1
- continue
- data[counter] = ' '.join([data[counter], row])
-
- arrays = dict()
- for value in data.values():
- match = RE_DISKS.search(value)
- if not match:
- continue
-
- match = match.groupdict()
- array = match.pop('array')
- arrays[array] = match
- for operation in OPERATIONS:
- arrays[array][operation] = 0
-
- match = RE_STATUS.search(value)
- if match:
- match = match.groupdict()
- if match['operation'] in OPERATIONS:
- arrays[array]['operation'] = match['operation']
- arrays[array][match['operation']] = float(match['operation_status']) * 100
- arrays[array]['finish'] = float(match['finish']) * 1000 * 60
- arrays[array]['speed'] = float(match['speed']) * 1000
-
- return arrays or None
diff --git a/python.d/powerdns.chart.py b/python.d/powerdns.chart.py
deleted file mode 100644
index a8d2f399c..000000000
--- a/python.d/powerdns.chart.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: powerdns netdata python.d module
-# Author: l2isbad
-
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-priority = 60000
-retries = 60
-# update_every = 3
-
-ORDER = ['questions', 'cache_usage', 'cache_size', 'latency']
-CHARTS = {
- 'questions': {
- 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
- 'lines': [
- ['udp-queries', None, 'incremental'],
- ['udp-answers', None, 'incremental'],
- ['tcp-queries', None, 'incremental'],
- ['tcp-answers', None, 'incremental']
- ]},
- 'cache_usage': {
- 'options': [None, 'PowerDNS Cache Usage', 'count', 'cache', 'powerdns.cache_usage', 'line'],
- 'lines': [
- ['query-cache-hit', None, 'incremental'],
- ['query-cache-miss', None, 'incremental'],
- ['packetcache-hit', 'packet-cache-hit', 'incremental'],
- ['packetcache-miss', 'packet-cache-miss', 'incremental']
- ]},
- 'cache_size': {
- 'options': [None, 'PowerDNS Cache Size', 'count', 'cache', 'powerdns.cache_size', 'line'],
- 'lines': [
- ['query-cache-size', None, 'absolute'],
- ['packetcache-size', 'packet-cache-size', 'absolute'],
- ['key-cache-size', None, 'absolute'],
- ['meta-cache-size', None, 'absolute']
- ]},
- 'latency': {
- 'options': [None, 'PowerDNS Latency', 'microseconds', 'latency', 'powerdns.latency', 'line'],
- 'lines': [
- ['latency', None, 'absolute']
- ]}
-
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- data = self._get_raw_data()
- if not data:
- return None
- return dict((d['name'], d['value']) for d in loads(data))
diff --git a/python.d/python-modules-installer.sh b/python.d/python-modules-installer.sh
deleted file mode 100644
index cda3c6662..000000000
--- a/python.d/python-modules-installer.sh
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env bash
-
-umask 022
-
-dir="/usr/local/libexec/netdata/python.d"
-target="${dir}/python_modules"
-pv="$(python -V 2>&1)"
-
-# parse parameters
-while [ ! -z "${1}" ]
-do
- case "${1}" in
- -p|--python)
- pv="Python ${2}"
- shift 2
- ;;
-
- -d|--dir)
- dir="${2}"
- target="${dir}/python_modules"
- echo >&2 "Will install python modules in: '${target}'"
- shift 2
- ;;
-
- -s|--system)
- target=
- echo >&2 "Will install python modules system-wide"
- shift
- ;;
-
- -h|--help)
- echo "${0} [--dir netdata-python.d-path] [--system]"
- echo "Please make sure you have installed packages: python-pip (or python3-pip) python-dev libyaml-dev libmysqlclient-dev"
- exit 0
- ;;
-
- *)
- echo >&2 "Cannot understand parameter: ${1}"
- exit 1
- ;;
- esac
-done
-
-
-if [ ! -z "${target}" -a ! -d "${target}" ]
-then
- echo >&2 "Cannot find directory: '${target}'"
- exit 1
-fi
-
-if [[ "${pv}" =~ ^Python\ 2.* ]]
-then
- pv=2
- pip="$(which pip2 2>/dev/null)"
-elif [[ "${pv}" =~ ^Python\ 3.* ]]
-then
- pv=3
- pip="$(which pip3 2>/dev/null)"
-else
- echo >&2 "Cannot detect python version. Is python installed?"
- exit 1
-fi
-
-[ -z "${pip}" ] && pip="$(which pip 2>/dev/null)"
-if [ -z "${pip}" ]
-then
- echo >&2 "pip command is required to install python v${pv} modules."
- [ "${pv}" = "2" ] && echo >&2 "Please install python-pip."
- [ "${pv}" = "3" ] && echo >&2 "Please install python3-pip."
- exit 1
-fi
-
-echo >&2 "Working for python version ${pv} (pip command: '${pip}')"
-echo >&2 "Installing netdata python modules in: '${target}'"
-
-run() {
- printf "Running command:\n# "
- printf "%q " "${@}"
- printf "\n"
- "${@}"
-}
-
-# try to install all the python modules given as parameters
-# until the first that succeeds
-failed=""
-installed=""
-errors=0
-pip_install() {
- local ret x msg="${1}"
- shift
-
- echo >&2
- echo >&2
- echo >&2 "Installing one of: ${*}"
-
- for x in "${@}"
- do
- echo >&2
- echo >&2 "attempting to install: ${x}"
- if [ ! -z "${target}" ]
- then
- run "${pip}" install --target "${target}" "${x}"
- ret=$?
- else
- run "${pip}" install "${x}"
- ret=$?
- fi
- [ ${ret} -eq 0 ] && break
- echo >&2 "failed to install: ${x}. ${msg}"
- done
-
- if [ ${ret} -ne 0 ]
- then
- echo >&2
- echo >&2
- echo >&2 "FAILED: could not install any of: ${*}. ${msg}"
- echo >&2
- echo >&2
- errors=$(( errors + 1 ))
- failed="${failed}|${*}"
- else
- echo >&2
- echo >&2
- echo >&2 "SUCCESS: we have: ${x}"
- echo >&2
- echo >&2
- installed="${installed} ${x}"
- fi
- return ${ret}
-}
-
-if [ "${pv}" = "2" ]
-then
- pip_install "is libyaml-dev and python-dev installed?" pyyaml
- pip_install "is libmysqlclient-dev and python-dev installed?" mysqlclient mysql-python pymysql
-else
- pip_install "is libyaml-dev and python-dev installed?" pyyaml
- pip_install "is libmysqlclient-dev and python-dev installed?" mysql-python mysqlclient pymysql
-fi
-
-echo >&2
-echo >&2
-if [ ${errors} -ne 0 ]
-then
- echo >&2 "Failed to install ${errors} modules: ${failed}"
- if [ ! -z "${target}" ]
- then
- echo >&2
- echo >&2 "If you are getting errors during cleanup from pip, there is a known bug"
- echo >&2 "in certain versions of pip that prevents installing packages local to an"
- echo >&2 "application. To install them system-wide please run:"
- echo >&2 "$0 --system"
- fi
- exit 1
-else
- echo >&2 "All done. We have: ${installed}"
- exit 0
-fi
diff --git a/python.d/python_modules/__init__.py b/python.d/python_modules/__init__.py
deleted file mode 100644
index 8d1c8b69c..000000000
--- a/python.d/python_modules/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/python.d/python_modules/base.py b/python.d/python_modules/base.py
deleted file mode 100644
index 7c6e1d2f2..000000000
--- a/python.d/python_modules/base.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: backward compatibility with old version
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.FrameworkServices.UrlService import UrlService
-from bases.FrameworkServices.SocketService import SocketService
-from bases.FrameworkServices.LogService import LogService
-from bases.FrameworkServices.ExecutableService import ExecutableService
-from bases.FrameworkServices.MySQLService import MySQLService
diff --git a/python.d/redis.chart.py b/python.d/redis.chart.py
deleted file mode 100644
index bcfcf16a6..000000000
--- a/python.d/redis.chart.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: redis netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-
-from bases.FrameworkServices.SocketService import SocketService
-
-# default module values (can be overridden per job in `config`)
-priority = 60000
-retries = 60
-
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority,
-# 'host': 'localhost',
-# 'port': 6379,
-# 'unix_socket': None
-# }}
-
-ORDER = ['operations', 'hit_rate', 'memory', 'keys', 'net', 'connections', 'clients', 'slaves', 'persistence',
- 'bgsave_now', 'bgsave_health']
-
-CHARTS = {
- 'operations': {
- 'options': [None, 'Redis Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
- 'lines': [
- ['total_commands_processed', 'commands', 'incremental'],
- ['instantaneous_ops_per_sec', 'operations', 'absolute']
- ]},
- 'hit_rate': {
- 'options': [None, 'Redis Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'],
- 'lines': [
- ['hit_rate', 'rate', 'absolute']
- ]},
- 'memory': {
- 'options': [None, 'Redis Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'],
- 'lines': [
- ['used_memory', 'total', 'absolute', 1, 1024],
- ['used_memory_lua', 'lua', 'absolute', 1, 1024]
- ]},
- 'net': {
- 'options': [None, 'Redis Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
- 'lines': [
- ['total_net_input_bytes', 'in', 'incremental', 8, 1024],
- ['total_net_output_bytes', 'out', 'incremental', -8, 1024]
- ]},
- 'keys': {
- 'options': [None, 'Redis Keys per Database', 'keys', 'keys', 'redis.keys', 'line'],
- 'lines': [
- # lines are created dynamically in `check()` method
- ]},
- 'connections': {
- 'options': [None, 'Redis Connections', 'connections/s', 'connections', 'redis.connections', 'line'],
- 'lines': [
- ['total_connections_received', 'received', 'incremental', 1],
- ['rejected_connections', 'rejected', 'incremental', -1]
- ]},
- 'clients': {
- 'options': [None, 'Redis Clients', 'clients', 'connections', 'redis.clients', 'line'],
- 'lines': [
- ['connected_clients', 'connected', 'absolute', 1],
- ['blocked_clients', 'blocked', 'absolute', -1]
- ]},
- 'slaves': {
- 'options': [None, 'Redis Slaves', 'slaves', 'replication', 'redis.slaves', 'line'],
- 'lines': [
- ['connected_slaves', 'connected', 'absolute']
- ]},
- 'persistence': {
- 'options': [None, 'Redis Persistence Changes Since Last Save', 'changes', 'persistence',
- 'redis.rdb_changes', 'line'],
- 'lines': [
- ['rdb_changes_since_last_save', 'changes', 'absolute']
- ]},
- 'bgsave_now': {
- 'options': [None, 'Duration of the RDB Save Operation', 'seconds', 'persistence',
- 'redis.bgsave_now', 'absolute'],
- 'lines': [
- ['rdb_bgsave_in_progress', 'rdb save', 'absolute']
- ]},
- 'bgsave_health': {
- 'options': [None, 'Status of the Last RDB Save Operation', 'status', 'persistence',
- 'redis.bgsave_health', 'line'],
- 'lines': [
- ['rdb_last_bgsave_status', 'rdb save', 'absolute']
- ]}
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self._keep_alive = True
- self.chart_name = ""
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 6379)
- self.unix_socket = self.configuration.get('socket')
- password = self.configuration.get('pass', str())
- self.bgsave_time = 0
- self.requests = dict(request='INFO\r\n'.encode(),
- password=' '.join(['AUTH', password, '\r\n']).encode() if password else None)
- self.request = self.requests['request']
-
- def _get_data(self):
- """
- Get data from socket
- :return: dict
- """
- if self.requests['password']:
- self.request = self.requests['password']
- raw = self._get_raw_data().strip()
- if raw != "+OK":
- self.error("invalid password")
- return None
- self.request = self.requests['request']
- response = self._get_raw_data()
- if response is None:
- # error has already been logged
- return None
-
- try:
- parsed = response.split("\n")
- except AttributeError:
- self.error("response is invalid/empty")
- return None
-
- data = dict()
- for line in parsed:
- if len(line) < 5 or line[0] == '$' or line[0] == '#':
- continue
-
- if line.startswith('db'):
- tmp = line.split(',')[0].replace('keys=', '')
- record = tmp.split(':')
- data[record[0]] = record[1]
- continue
-
- try:
- t = line.split(':')
- data[t[0]] = t[1]
- except (IndexError, ValueError):
- self.debug("invalid line received: " + str(line))
-
- if not data:
- self.error("received data doesn't have any records")
- return None
-
- try:
- data['hit_rate'] = (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits'])
- + int(data['keyspace_misses']))
- except (KeyError, ZeroDivisionError, TypeError):
- data['hit_rate'] = 0
-
- if data['rdb_bgsave_in_progress'] != '0\r':
- self.bgsave_time += self.update_every
- else:
- self.bgsave_time = 0
-
- data['rdb_last_bgsave_status'] = 0 if data['rdb_last_bgsave_status'] == 'ok\r' else 1
- data['rdb_bgsave_in_progress'] = self.bgsave_time
-
- return data
-
- def _check_raw_data(self, data):
- """
- Check if all data has been gathered from socket.
- Parse first line containing message length and check against received message
- :param data: str
- :return: boolean
- """
- length = len(data)
- supposed = data.split('\n')[0][1:-1]
- offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
- if not supposed.isdigit():
- return True
- supposed = int(supposed)
-
- if length - offset >= supposed:
- self.debug("received full response from redis")
- return True
-
- self.debug("waiting more data from redis")
- return False
-
- def check(self):
- """
- Parse configuration, check if redis is available, and dynamically create chart lines data
- :return: boolean
- """
- data = self._get_data()
- if data is None:
- return False
-
- for name in data:
- if name.startswith('db'):
- self.definitions['keys']['lines'].append([name, None, 'absolute'])
- return True
diff --git a/python.d/samba.chart.py b/python.d/samba.chart.py
deleted file mode 100644
index 3f4fd5a12..000000000
--- a/python.d/samba.chart.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: samba netdata python.d module
-# Author: Christopher Cox <chris_cox@endlessnow.com>
-#
-# The netdata user needs to be able to be able to sudo the smbstatus program
-# without password:
-# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
-#
-# This makes calls to smbstatus -P
-#
-# This just looks at a couple of values out of syscall, and some from smb2.
-#
-# The Lesser Ops chart is merely a display of current counter values. They
-# didn't seem to change much to me. However, if you notice something changing
-# a lot there, bring one or more out into its own chart and make it incremental
-# (like find and notify... good examples).
-
-import re
-
-from bases.collection import find_binary
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-# default module values (can be overridden per job in `config`)
-update_every = 5
-priority = 60000
-retries = 60
-
-ORDER = ['syscall_rw','smb2_rw','smb2_create_close','smb2_info','smb2_find','smb2_notify','smb2_sm_count']
-
-CHARTS = {
- 'syscall_rw': {
- 'lines': [
- ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
- ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
- ],
- 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area']
- },
- 'smb2_rw': {
- 'lines': [
- ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
- ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
- ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
- ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
- ],
- 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area']
- },
- 'smb2_create_close': {
- 'lines': [
- ['smb2_create_count', 'create', 'incremental', 1, 1],
- ['smb2_close_count', 'close', 'incremental', -1, 1]
- ],
- 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line']
- },
- 'smb2_info': {
- 'lines': [
- ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
- ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
- ],
- 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line']
- },
- 'smb2_find': {
- 'lines': [
- ['smb2_find_count', 'find', 'incremental', 1, 1]
- ],
- 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line']
- },
- 'smb2_notify': {
- 'lines': [
- ['smb2_notify_count', 'notify', 'incremental', 1, 1]
- ],
- 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line']
- },
- 'smb2_sm_count': {
- 'lines': [
- ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
- ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
- ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
- ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
- ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
- ['smb2_flush_count', 'flush', 'absolute', 1, 1],
- ['smb2_lock_count', 'lock', 'absolute', 1, 1],
- ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
- ['smb2_break_count', 'break', 'absolute', 1, 1],
- ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
- ],
- 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked']
- }
- }
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
-
- def check(self):
- sudo_binary, smbstatus_binary = find_binary('sudo'), find_binary('smbstatus')
-
- if not (sudo_binary and smbstatus_binary):
- self.error("Can\'t locate 'sudo' or 'smbstatus' binary")
- return False
-
- self.command = [sudo_binary, '-v']
- err = self._get_raw_data(stderr=True)
- if err:
- self.error(''.join(err))
- return False
-
- self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
-
- return ExecutableService.check(self)
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- parsed = self.rgx_smb2.findall(' '.join(raw_data))
-
- return dict(parsed) or None