summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--collectors/python.d.plugin/Makefile.am (renamed from python.d/Makefile.am)150
-rw-r--r--collectors/python.d.plugin/Makefile.in1987
-rw-r--r--collectors/python.d.plugin/README.md198
-rw-r--r--collectors/python.d.plugin/adaptec_raid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md46
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py247
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf55
-rw-r--r--collectors/python.d.plugin/apache/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/apache/README.md59
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py (renamed from python.d/apache.chart.py)57
-rw-r--r--collectors/python.d.plugin/apache/apache.conf (renamed from conf.d/python.d/apache.conf)2
-rw-r--r--collectors/python.d.plugin/beanstalk/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md103
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py (renamed from python.d/beanstalk.chart.py)43
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.conf (renamed from conf.d/python.d/beanstalk.conf)0
-rw-r--r--collectors/python.d.plugin/bind_rndc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md60
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py (renamed from python.d/bind_rndc.chart.py)77
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.conf (renamed from conf.d/python.d/bind_rndc.conf)0
-rw-r--r--collectors/python.d.plugin/boinc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/boinc/README.md28
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py162
-rw-r--r--collectors/python.d.plugin/boinc/boinc.conf68
-rw-r--r--collectors/python.d.plugin/ceph/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ceph/README.md32
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py (renamed from python.d/ceph.chart.py)86
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf (renamed from conf.d/python.d/ceph.conf)0
-rw-r--r--collectors/python.d.plugin/chrony/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/chrony/README.md31
-rw-r--r--collectors/python.d.plugin/chrony/chrony.chart.py (renamed from python.d/chrony.chart.py)69
-rw-r--r--collectors/python.d.plugin/chrony/chrony.conf (renamed from conf.d/python.d/chrony.conf)0
-rw-r--r--collectors/python.d.plugin/couchdb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/couchdb/README.md35
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py (renamed from python.d/couchdb.chart.py)3
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.conf (renamed from conf.d/python.d/couchdb.conf)0
-rw-r--r--collectors/python.d.plugin/cpufreq/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/cpufreq/README.md30
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.chart.py (renamed from python.d/cpufreq.chart.py)12
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.conf (renamed from conf.d/python.d/cpufreq.conf)0
-rw-r--r--collectors/python.d.plugin/cpuidle/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/cpuidle/README.md11
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.chart.py (renamed from python.d/cpuidle.chart.py)8
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.conf40
-rw-r--r--collectors/python.d.plugin/dns_query_time/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md10
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py (renamed from python.d/dns_query_time.chart.py)27
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.conf (renamed from conf.d/python.d/dns_query_time.conf)0
-rw-r--r--collectors/python.d.plugin/dnsdist/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md54
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py133
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.conf (renamed from conf.d/python.d/dnsdist.conf)0
-rw-r--r--collectors/python.d.plugin/dockerd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dockerd/README.md26
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py77
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.conf79
-rw-r--r--collectors/python.d.plugin/dovecot/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dovecot/README.md73
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py (renamed from python.d/dovecot.chart.py)87
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.conf (renamed from conf.d/python.d/dovecot.conf)0
-rw-r--r--collectors/python.d.plugin/elasticsearch/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md60
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py (renamed from python.d/elasticsearch.chart.py)220
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.conf (renamed from conf.d/python.d/elasticsearch.conf)0
-rw-r--r--collectors/python.d.plugin/example/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/example/README.md1
-rw-r--r--collectors/python.d.plugin/example/example.chart.py (renamed from python.d/example.chart.py)3
-rw-r--r--collectors/python.d.plugin/example/example.conf (renamed from conf.d/python.d/example.conf)0
-rw-r--r--collectors/python.d.plugin/exim/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/exim/README.md13
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py (renamed from python.d/exim.chart.py)8
-rw-r--r--collectors/python.d.plugin/exim/exim.conf (renamed from conf.d/python.d/exim.conf)0
-rw-r--r--collectors/python.d.plugin/fail2ban/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md23
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py196
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.conf (renamed from conf.d/python.d/fail2ban.conf)0
-rw-r--r--collectors/python.d.plugin/freeradius/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/freeradius/README.md70
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py (renamed from python.d/freeradius.chart.py)24
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.conf (renamed from conf.d/python.d/freeradius.conf)0
-rw-r--r--collectors/python.d.plugin/go_expvar/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md276
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py (renamed from python.d/go_expvar.chart.py)22
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.conf (renamed from conf.d/python.d/go_expvar.conf)2
-rw-r--r--collectors/python.d.plugin/haproxy/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/haproxy/README.md49
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py (renamed from python.d/haproxy.chart.py)243
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf (renamed from conf.d/python.d/haproxy.conf)0
-rw-r--r--collectors/python.d.plugin/hddtemp/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md22
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py100
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.conf (renamed from conf.d/python.d/hddtemp.conf)0
-rw-r--r--collectors/python.d.plugin/httpcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md41
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py (renamed from python.d/httpcheck.chart.py)18
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf (renamed from conf.d/python.d/httpcheck.conf)1
-rw-r--r--collectors/python.d.plugin/icecast/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/icecast/README.md26
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py (renamed from python.d/icecast.chart.py)15
-rw-r--r--collectors/python.d.plugin/icecast/icecast.conf (renamed from conf.d/python.d/icecast.conf)0
-rw-r--r--collectors/python.d.plugin/ipfs/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ipfs/README.md25
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py (renamed from python.d/ipfs.chart.py)60
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf (renamed from conf.d/python.d/ipfs.conf)9
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md34
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py (renamed from python.d/isc_dhcpd.chart.py)20
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf (renamed from conf.d/python.d/isc_dhcpd.conf)0
-rw-r--r--collectors/python.d.plugin/linux_power_supply/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/linux_power_supply/README.md67
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py160
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf81
-rw-r--r--collectors/python.d.plugin/litespeed/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/litespeed/README.md47
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py186
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.conf74
-rw-r--r--collectors/python.d.plugin/logind/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/logind/README.md54
-rw-r--r--collectors/python.d.plugin/logind/logind.chart.py79
-rw-r--r--collectors/python.d.plugin/logind/logind.conf62
-rw-r--r--collectors/python.d.plugin/mdstat/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mdstat/README.md26
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.chart.py205
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.conf (renamed from conf.d/python.d/mdstat.conf)0
-rw-r--r--collectors/python.d.plugin/megacli/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/megacli/README.md48
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py279
-rw-r--r--collectors/python.d.plugin/megacli/megacli.conf62
-rw-r--r--collectors/python.d.plugin/memcached/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/memcached/README.md69
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py (renamed from python.d/memcached.chart.py)61
-rw-r--r--collectors/python.d.plugin/memcached/memcached.conf (renamed from conf.d/python.d/memcached.conf)0
-rw-r--r--collectors/python.d.plugin/mongodb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mongodb/README.md141
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py (renamed from python.d/mongodb.chart.py)150
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.conf (renamed from conf.d/python.d/mongodb.conf)0
-rw-r--r--collectors/python.d.plugin/monit/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/monit/README.md33
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py166
-rw-r--r--collectors/python.d.plugin/monit/monit.conf88
-rw-r--r--collectors/python.d.plugin/mysql/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mysql/README.md90
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py (renamed from python.d/mysql.chart.py)451
-rw-r--r--collectors/python.d.plugin/mysql/mysql.conf (renamed from conf.d/python.d/mysql.conf)0
-rw-r--r--collectors/python.d.plugin/nginx/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx/README.md45
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py (renamed from python.d/nginx.chart.py)27
-rw-r--r--collectors/python.d.plugin/nginx/nginx.conf (renamed from conf.d/python.d/nginx.conf)0
-rw-r--r--collectors/python.d.plugin/nginx_plus/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md125
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py (renamed from python.d/nginx_plus.chart.py)107
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.conf (renamed from conf.d/python.d/nginx_plus.conf)0
-rw-r--r--collectors/python.d.plugin/nsd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nsd/README.md54
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py (renamed from python.d/nsd.chart.py)45
-rw-r--r--collectors/python.d.plugin/nsd/nsd.conf (renamed from conf.d/python.d/nsd.conf)0
-rw-r--r--collectors/python.d.plugin/ntpd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ntpd/README.md71
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py (renamed from python.d/ntpd.chart.py)80
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.conf (renamed from conf.d/python.d/ntpd.conf)0
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md32
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py (renamed from python.d/ovpn_status_log.chart.py)27
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf (renamed from conf.d/python.d/ovpn_status_log.conf)6
-rw-r--r--collectors/python.d.plugin/phpfpm/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md40
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py (renamed from python.d/phpfpm.chart.py)20
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.conf (renamed from conf.d/python.d/phpfpm.conf)2
-rw-r--r--collectors/python.d.plugin/portcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/portcheck/README.md35
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py (renamed from python.d/portcheck.chart.py)12
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.conf (renamed from conf.d/python.d/portcheck.conf)0
-rw-r--r--collectors/python.d.plugin/postfix/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/postfix/README.md15
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py (renamed from python.d/postfix.chart.py)15
-rw-r--r--collectors/python.d.plugin/postfix/postfix.conf (renamed from conf.d/python.d/postfix.conf)0
-rw-r--r--collectors/python.d.plugin/postgres/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/postgres/README.md68
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py (renamed from python.d/postgres.chart.py)593
-rw-r--r--collectors/python.d.plugin/postgres/postgres.conf (renamed from conf.d/python.d/postgres.conf)0
-rw-r--r--collectors/python.d.plugin/powerdns/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/powerdns/README.md77
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py150
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.conf (renamed from conf.d/python.d/powerdns.conf)0
-rw-r--r--collectors/python.d.plugin/proxysql/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/proxysql/README.md62
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.chart.py356
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.conf118
-rw-r--r--collectors/python.d.plugin/puppet/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/puppet/README.md48
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py121
-rw-r--r--collectors/python.d.plugin/puppet/puppet.conf98
-rw-r--r--collectors/python.d.plugin/python.d.conf (renamed from conf.d/python.d.conf)30
-rw-r--r--[-rwxr-xr-x]collectors/python.d.plugin/python.d.plugin (renamed from plugins.d/python.d.plugin)121
-rwxr-xr-xcollectors/python.d.plugin/python.d.plugin.in427
-rw-r--r--collectors/python.d.plugin/python_modules/__init__.py (renamed from python.d/python_modules/third_party/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py (renamed from python.d/python_modules/bases/FrameworkServices/ExecutableService.py)9
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py (renamed from python.d/python_modules/bases/FrameworkServices/LogService.py)2
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py (renamed from python.d/python_modules/bases/FrameworkServices/MySQLService.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py (renamed from python.d/python_modules/bases/FrameworkServices/SimpleService.py)13
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py (renamed from python.d/python_modules/bases/FrameworkServices/SocketService.py)64
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py (renamed from python.d/python_modules/bases/FrameworkServices/UrlService.py)32
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py (renamed from python.d/python_modules/urllib3/contrib/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/__init__.py (renamed from python.d/python_modules/urllib3/contrib/_securetransport/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py (renamed from python.d/python_modules/bases/charts.py)18
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py (renamed from python.d/python_modules/bases/collection.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loaders.py (renamed from python.d/python_modules/bases/loaders.py)21
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py (renamed from python.d/python_modules/bases/loggers.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/__init__.py (renamed from python.d/python_modules/pyyaml2/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/composer.py (renamed from python.d/python_modules/pyyaml2/composer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/constructor.py (renamed from python.d/python_modules/pyyaml2/constructor.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py (renamed from python.d/python_modules/pyyaml2/cyaml.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/dumper.py (renamed from python.d/python_modules/pyyaml2/dumper.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/emitter.py (renamed from python.d/python_modules/pyyaml2/emitter.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/error.py (renamed from python.d/python_modules/pyyaml2/error.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/events.py (renamed from python.d/python_modules/pyyaml2/events.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/loader.py (renamed from python.d/python_modules/pyyaml2/loader.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/nodes.py (renamed from python.d/python_modules/pyyaml2/nodes.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/parser.py (renamed from python.d/python_modules/pyyaml2/parser.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/reader.py (renamed from python.d/python_modules/pyyaml2/reader.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/representer.py (renamed from python.d/python_modules/pyyaml2/representer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/resolver.py (renamed from python.d/python_modules/pyyaml2/resolver.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/scanner.py (renamed from python.d/python_modules/pyyaml2/scanner.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/serializer.py (renamed from python.d/python_modules/pyyaml2/serializer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/tokens.py (renamed from python.d/python_modules/pyyaml2/tokens.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/__init__.py (renamed from python.d/python_modules/pyyaml3/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/composer.py (renamed from python.d/python_modules/pyyaml3/composer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/constructor.py (renamed from python.d/python_modules/pyyaml3/constructor.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py (renamed from python.d/python_modules/pyyaml3/cyaml.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/dumper.py (renamed from python.d/python_modules/pyyaml3/dumper.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/emitter.py (renamed from python.d/python_modules/pyyaml3/emitter.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/error.py (renamed from python.d/python_modules/pyyaml3/error.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/events.py (renamed from python.d/python_modules/pyyaml3/events.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/loader.py (renamed from python.d/python_modules/pyyaml3/loader.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/nodes.py (renamed from python.d/python_modules/pyyaml3/nodes.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/parser.py (renamed from python.d/python_modules/pyyaml3/parser.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/reader.py (renamed from python.d/python_modules/pyyaml3/reader.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/representer.py (renamed from python.d/python_modules/pyyaml3/representer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/resolver.py (renamed from python.d/python_modules/pyyaml3/resolver.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/scanner.py (renamed from python.d/python_modules/pyyaml3/scanner.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/serializer.py (renamed from python.d/python_modules/pyyaml3/serializer.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/tokens.py (renamed from python.d/python_modules/pyyaml3/tokens.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/__init__.py (renamed from python.d/python_modules/urllib3/packages/backports/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/boinc_client.py515
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/lm_sensors.py (renamed from python.d/python_modules/third_party/lm_sensors.py)3
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/mcrcon.py74
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/monotonic.py171
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/ordereddict.py (renamed from python.d/python_modules/third_party/ordereddict.py)20
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/__init__.py (renamed from python.d/python_modules/urllib3/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/_collections.py (renamed from python.d/python_modules/urllib3/_collections.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connection.py (renamed from python.d/python_modules/urllib3/connection.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connectionpool.py (renamed from python.d/python_modules/urllib3/connectionpool.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py (renamed from src/.keep)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py (renamed from python.d/python_modules/urllib3/contrib/_securetransport/bindings.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py (renamed from python.d/python_modules/urllib3/contrib/_securetransport/low_level.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py (renamed from python.d/python_modules/urllib3/contrib/appengine.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py (renamed from python.d/python_modules/urllib3/contrib/ntlmpool.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py (renamed from python.d/python_modules/urllib3/contrib/pyopenssl.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py (renamed from python.d/python_modules/urllib3/contrib/securetransport.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py (renamed from python.d/python_modules/urllib3/contrib/socks.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/exceptions.py (renamed from python.d/python_modules/urllib3/exceptions.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/fields.py (renamed from python.d/python_modules/urllib3/fields.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/filepost.py (renamed from python.d/python_modules/urllib3/filepost.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py (renamed from python.d/python_modules/urllib3/packages/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py (renamed from python.d/python_modules/urllib3/packages/backports/makefile.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py (renamed from python.d/python_modules/urllib3/packages/ordered_dict.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/six.py (renamed from python.d/python_modules/urllib3/packages/six.py)18
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py (renamed from python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py (renamed from python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py)3
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/poolmanager.py (renamed from python.d/python_modules/urllib3/poolmanager.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/request.py (renamed from python.d/python_modules/urllib3/request.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/response.py (renamed from python.d/python_modules/urllib3/response.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/__init__.py (renamed from python.d/python_modules/urllib3/util/__init__.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/connection.py (renamed from python.d/python_modules/urllib3/util/connection.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/request.py (renamed from python.d/python_modules/urllib3/util/request.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/response.py (renamed from python.d/python_modules/urllib3/util/response.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/retry.py (renamed from python.d/python_modules/urllib3/util/retry.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/selectors.py (renamed from python.d/python_modules/urllib3/util/selectors.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py (renamed from python.d/python_modules/urllib3/util/ssl_.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/timeout.py (renamed from python.d/python_modules/urllib3/util/timeout.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/url.py (renamed from python.d/python_modules/urllib3/util/url.py)1
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/wait.py (renamed from python.d/python_modules/urllib3/util/wait.py)1
-rw-r--r--collectors/python.d.plugin/rabbitmq/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md56
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py (renamed from python.d/rabbitmq.chart.py)112
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf (renamed from conf.d/python.d/rabbitmq.conf)0
-rw-r--r--collectors/python.d.plugin/redis/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/redis/README.md42
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py261
-rw-r--r--collectors/python.d.plugin/redis/redis.conf (renamed from conf.d/python.d/redis.conf)0
-rw-r--r--collectors/python.d.plugin/rethinkdbs/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md34
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py235
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf78
-rw-r--r--collectors/python.d.plugin/retroshare/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/retroshare/README.md1
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py (renamed from python.d/retroshare.chart.py)10
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.conf (renamed from conf.d/python.d/retroshare.conf)0
-rw-r--r--collectors/python.d.plugin/samba/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/samba/README.md67
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py138
-rw-r--r--collectors/python.d.plugin/samba/samba.conf (renamed from conf.d/python.d/samba.conf)0
-rw-r--r--collectors/python.d.plugin/sensors/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/sensors/README.md17
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py (renamed from python.d/sensors.chart.py)29
-rw-r--r--collectors/python.d.plugin/sensors/sensors.conf (renamed from conf.d/python.d/sensors.conf)0
-rw-r--r--collectors/python.d.plugin/smartd_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md38
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py (renamed from python.d/smartd_log.chart.py)9
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf (renamed from conf.d/python.d/smartd_log.conf)0
-rw-r--r--collectors/python.d.plugin/spigotmc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md22
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py120
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf68
-rw-r--r--collectors/python.d.plugin/springboot/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/springboot/README.md129
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py (renamed from python.d/springboot.chart.py)96
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf (renamed from conf.d/python.d/springboot.conf)0
-rw-r--r--collectors/python.d.plugin/squid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/squid/README.md38
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py (renamed from python.d/squid.chart.py)75
-rw-r--r--collectors/python.d.plugin/squid/squid.conf (renamed from conf.d/python.d/squid.conf)0
-rw-r--r--collectors/python.d.plugin/tomcat/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/tomcat/README.md33
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py (renamed from python.d/tomcat.chart.py)110
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.conf (renamed from conf.d/python.d/tomcat.conf)0
-rw-r--r--collectors/python.d.plugin/traefik/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/traefik/README.md54
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py (renamed from python.d/traefik.chart.py)40
-rw-r--r--collectors/python.d.plugin/traefik/traefik.conf (renamed from conf.d/python.d/traefik.conf)0
-rw-r--r--collectors/python.d.plugin/unbound/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/unbound/README.md76
-rw-r--r--collectors/python.d.plugin/unbound/unbound.chart.py275
-rw-r--r--collectors/python.d.plugin/unbound/unbound.conf87
-rw-r--r--collectors/python.d.plugin/uwsgi/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md37
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py183
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.conf94
-rw-r--r--collectors/python.d.plugin/varnish/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/varnish/README.md69
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py (renamed from python.d/varnish.chart.py)29
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf (renamed from conf.d/python.d/varnish.conf)0
-rw-r--r--collectors/python.d.plugin/w1sensor/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md13
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py93
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.conf74
-rw-r--r--collectors/python.d.plugin/web_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/web_log/README.md64
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py (renamed from python.d/web_log.chart.py)359
-rw-r--r--collectors/python.d.plugin/web_log/web_log.conf (renamed from conf.d/python.d/web_log.conf)13
351 files changed, 15261 insertions, 1531 deletions
diff --git a/python.d/Makefile.am b/collectors/python.d.plugin/Makefile.am
index a5fcc7394..5f214e436 100644
--- a/python.d/Makefile.am
+++ b/collectors/python.d.plugin/Makefile.am
@@ -1,73 +1,110 @@
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
CLEANFILES = \
- python-modules-installer.sh \
+ python.d.plugin \
$(NULL)
include $(top_srcdir)/build/subst.inc
-
SUFFIXES = .in
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ python.d.plugin.in \
+ README.md \
+ $(NULL)
+
dist_python_SCRIPTS = \
- python-modules-installer.sh \
$(NULL)
dist_python_DATA = \
- README.md \
- apache.chart.py \
- beanstalk.chart.py \
- bind_rndc.chart.py \
- ceph.chart.py \
- chrony.chart.py \
- couchdb.chart.py \
- cpufreq.chart.py \
- cpuidle.chart.py \
- dns_query_time.chart.py \
- dnsdist.chart.py \
- dovecot.chart.py \
- elasticsearch.chart.py \
- example.chart.py \
- exim.chart.py \
- fail2ban.chart.py \
- freeradius.chart.py \
- go_expvar.chart.py \
- haproxy.chart.py \
- hddtemp.chart.py \
- httpcheck.chart.py \
- icecast.chart.py \
- ipfs.chart.py \
- isc_dhcpd.chart.py \
- mdstat.chart.py \
- memcached.chart.py \
- mongodb.chart.py \
- mysql.chart.py \
- nginx.chart.py \
- nginx_plus.chart.py \
- nsd.chart.py \
- ntpd.chart.py \
- ovpn_status_log.chart.py \
- phpfpm.chart.py \
- portcheck.chart.py \
- postfix.chart.py \
- postgres.chart.py \
- powerdns.chart.py \
- rabbitmq.chart.py \
- redis.chart.py \
- retroshare.chart.py \
- samba.chart.py \
- sensors.chart.py \
- springboot.chart.py \
- squid.chart.py \
- smartd_log.chart.py \
- tomcat.chart.py \
- traefik.chart.py \
- varnish.chart.py \
- web_log.chart.py \
$(NULL)
+userpythonconfigdir=$(configdir)/python.d
+dist_userpythonconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+pythonconfigdir=$(libconfigdir)/python.d
+dist_pythonconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+include adaptec_raid/Makefile.inc
+include apache/Makefile.inc
+include beanstalk/Makefile.inc
+include bind_rndc/Makefile.inc
+include boinc/Makefile.inc
+include ceph/Makefile.inc
+include chrony/Makefile.inc
+include couchdb/Makefile.inc
+include cpufreq/Makefile.inc
+include cpuidle/Makefile.inc
+include dnsdist/Makefile.inc
+include dns_query_time/Makefile.inc
+include dockerd/Makefile.inc
+include dovecot/Makefile.inc
+include elasticsearch/Makefile.inc
+include example/Makefile.inc
+include exim/Makefile.inc
+include fail2ban/Makefile.inc
+include freeradius/Makefile.inc
+include go_expvar/Makefile.inc
+include haproxy/Makefile.inc
+include hddtemp/Makefile.inc
+include httpcheck/Makefile.inc
+include icecast/Makefile.inc
+include ipfs/Makefile.inc
+include isc_dhcpd/Makefile.inc
+include linux_power_supply/Makefile.inc
+include litespeed/Makefile.inc
+include logind/Makefile.inc
+include mdstat/Makefile.inc
+include megacli/Makefile.inc
+include memcached/Makefile.inc
+include mongodb/Makefile.inc
+include monit/Makefile.inc
+include mysql/Makefile.inc
+include nginx/Makefile.inc
+include nginx_plus/Makefile.inc
+include nsd/Makefile.inc
+include ntpd/Makefile.inc
+include ovpn_status_log/Makefile.inc
+include phpfpm/Makefile.inc
+include portcheck/Makefile.inc
+include postfix/Makefile.inc
+include postgres/Makefile.inc
+include powerdns/Makefile.inc
+include proxysql/Makefile.inc
+include puppet/Makefile.inc
+include rabbitmq/Makefile.inc
+include redis/Makefile.inc
+include rethinkdbs/Makefile.inc
+include retroshare/Makefile.inc
+include samba/Makefile.inc
+include sensors/Makefile.inc
+include smartd_log/Makefile.inc
+include spigotmc/Makefile.inc
+include springboot/Makefile.inc
+include squid/Makefile.inc
+include tomcat/Makefile.inc
+include traefik/Makefile.inc
+include unbound/Makefile.inc
+include uwsgi/Makefile.inc
+include varnish/Makefile.inc
+include w1sensor/Makefile.inc
+include web_log/Makefile.inc
+
pythonmodulesdir=$(pythondir)/python_modules
dist_pythonmodules_DATA = \
python_modules/__init__.py \
- python_modules/base.py \
$(NULL)
basesdir=$(pythonmodulesdir)/bases
@@ -95,6 +132,9 @@ dist_third_party_DATA = \
python_modules/third_party/__init__.py \
python_modules/third_party/ordereddict.py \
python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
$(NULL)
pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
diff --git a/collectors/python.d.plugin/Makefile.in b/collectors/python.d.plugin/Makefile.in
new file mode 100644
index 000000000..ca2743d58
--- /dev/null
+++ b/collectors/python.d.plugin/Makefile.in
@@ -0,0 +1,1987 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(top_srcdir)/build/subst.inc \
+ $(srcdir)/adaptec_raid/Makefile.inc \
+ $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc \
+ $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc \
+ $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc \
+ $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc \
+ $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc \
+ $(srcdir)/dns_query_time/Makefile.inc \
+ $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc \
+ $(srcdir)/elasticsearch/Makefile.inc \
+ $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc \
+ $(srcdir)/fail2ban/Makefile.inc \
+ $(srcdir)/freeradius/Makefile.inc \
+ $(srcdir)/go_expvar/Makefile.inc \
+ $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc \
+ $(srcdir)/httpcheck/Makefile.inc \
+ $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc \
+ $(srcdir)/isc_dhcpd/Makefile.inc \
+ $(srcdir)/linux_power_supply/Makefile.inc \
+ $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc \
+ $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc \
+ $(srcdir)/memcached/Makefile.inc \
+ $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc \
+ $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc \
+ $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc \
+ $(srcdir)/ntpd/Makefile.inc \
+ $(srcdir)/ovpn_status_log/Makefile.inc \
+ $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc \
+ $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc \
+ $(srcdir)/powerdns/Makefile.inc \
+ $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc \
+ $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc \
+ $(srcdir)/rethinkdbs/Makefile.inc \
+ $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc \
+ $(srcdir)/sensors/Makefile.inc \
+ $(srcdir)/smartd_log/Makefile.inc \
+ $(srcdir)/spigotmc/Makefile.inc \
+ $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc \
+ $(srcdir)/tomcat/Makefile.inc $(srcdir)/traefik/Makefile.inc \
+ $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc \
+ $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc \
+ $(srcdir)/web_log/Makefile.inc $(srcdir)/Makefile.in \
+ $(srcdir)/Makefile.am $(dist_plugins_SCRIPTS) \
+ $(dist_python_SCRIPTS) $(dist_bases_DATA) \
+ $(dist_bases_framework_services_DATA) $(dist_libconfig_DATA) \
+ $(dist_noinst_DATA) $(dist_python_DATA) \
+ $(dist_python_urllib3_DATA) \
+ $(dist_python_urllib3_backports_DATA) \
+ $(dist_python_urllib3_contrib_DATA) \
+ $(dist_python_urllib3_packages_DATA) \
+ $(dist_python_urllib3_securetransport_DATA) \
+ $(dist_python_urllib3_ssl_match_hostname_DATA) \
+ $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
+ $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
+ $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
+ $(dist_userpythonconfig_DATA)
+subdir = collectors/python.d.plugin
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/build/m4/ax_c___atomic.m4 \
+ $(top_srcdir)/build/m4/ax_c__generic.m4 \
+ $(top_srcdir)/build/m4/ax_c_lto.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallinfo.m4 \
+ $(top_srcdir)/build/m4/ax_c_mallopt.m4 \
+ $(top_srcdir)/build/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/build/m4/ax_gcc_func_attribute.m4 \
+ $(top_srcdir)/build/m4/ax_pthread.m4 \
+ $(top_srcdir)/build/m4/jemalloc.m4 \
+ $(top_srcdir)/build/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" \
+ "$(DESTDIR)$(basesdir)" \
+ "$(DESTDIR)$(bases_framework_servicesdir)" \
+ "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" \
+ "$(DESTDIR)$(python_urllib3dir)" \
+ "$(DESTDIR)$(python_urllib3_backportsdir)" \
+ "$(DESTDIR)$(python_urllib3_contribdir)" \
+ "$(DESTDIR)$(python_urllib3_packagesdir)" \
+ "$(DESTDIR)$(python_urllib3_securetransportdir)" \
+ "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" \
+ "$(DESTDIR)$(python_urllib3_utildir)" \
+ "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" \
+ "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" \
+ "$(DESTDIR)$(third_partydir)" \
+ "$(DESTDIR)$(userpythonconfigdir)"
+SCRIPTS = $(dist_plugins_SCRIPTS) $(dist_python_SCRIPTS)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+DATA = $(dist_bases_DATA) $(dist_bases_framework_services_DATA) \
+ $(dist_libconfig_DATA) $(dist_noinst_DATA) $(dist_python_DATA) \
+ $(dist_python_urllib3_DATA) \
+ $(dist_python_urllib3_backports_DATA) \
+ $(dist_python_urllib3_contrib_DATA) \
+ $(dist_python_urllib3_packages_DATA) \
+ $(dist_python_urllib3_securetransport_DATA) \
+ $(dist_python_urllib3_ssl_match_hostname_DATA) \
+ $(dist_python_urllib3_util_DATA) $(dist_pythonconfig_DATA) \
+ $(dist_pythonmodules_DATA) $(dist_pythonyaml2_DATA) \
+ $(dist_pythonyaml3_DATA) $(dist_third_party_DATA) \
+ $(dist_userpythonconfig_DATA)
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+IPMIMONITORING_CFLAGS = @IPMIMONITORING_CFLAGS@
+IPMIMONITORING_LIBS = @IPMIMONITORING_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCAP_CFLAGS = @LIBCAP_CFLAGS@
+LIBCAP_LIBS = @LIBCAP_LIBS@
+LIBMNL_CFLAGS = @LIBMNL_CFLAGS@
+LIBMNL_LIBS = @LIBMNL_LIBS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MATH_CFLAGS = @MATH_CFLAGS@
+MATH_LIBS = @MATH_LIBS@
+MKDIR_P = @MKDIR_P@
+NFACCT_CFLAGS = @NFACCT_CFLAGS@
+NFACCT_LIBS = @NFACCT_LIBS@
+OBJEXT = @OBJEXT@
+OPTIONAL_IPMIMONITORING_CFLAGS = @OPTIONAL_IPMIMONITORING_CFLAGS@
+OPTIONAL_IPMIMONITORING_LIBS = @OPTIONAL_IPMIMONITORING_LIBS@
+OPTIONAL_LIBCAP_CFLAGS = @OPTIONAL_LIBCAP_CFLAGS@
+OPTIONAL_LIBCAP_LIBS = @OPTIONAL_LIBCAP_LIBS@
+OPTIONAL_MATH_CLFAGS = @OPTIONAL_MATH_CLFAGS@
+OPTIONAL_MATH_LIBS = @OPTIONAL_MATH_LIBS@
+OPTIONAL_NFACCT_CLFAGS = @OPTIONAL_NFACCT_CLFAGS@
+OPTIONAL_NFACCT_LIBS = @OPTIONAL_NFACCT_LIBS@
+OPTIONAL_UUID_CLFAGS = @OPTIONAL_UUID_CLFAGS@
+OPTIONAL_UUID_LIBS = @OPTIONAL_UUID_LIBS@
+OPTIONAL_ZLIB_CLFAGS = @OPTIONAL_ZLIB_CLFAGS@
+OPTIONAL_ZLIB_LIBS = @OPTIONAL_ZLIB_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_RPM_RELEASE = @PACKAGE_RPM_RELEASE@
+PACKAGE_RPM_VERSION = @PACKAGE_RPM_VERSION@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SSE_CANDIDATE = @SSE_CANDIDATE@
+STRIP = @STRIP@
+UUID_CFLAGS = @UUID_CFLAGS@
+UUID_LIBS = @UUID_LIBS@
+VERSION = @VERSION@
+ZLIB_CFLAGS = @ZLIB_CFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_target = @build_target@
+build_vendor = @build_vendor@
+builddir = @builddir@
+cachedir = @cachedir@
+chartsdir = @chartsdir@
+configdir = @configdir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+has_jemalloc = @has_jemalloc@
+has_tcmalloc = @has_tcmalloc@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libconfigdir = @libconfigdir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+logdir = @logdir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+nodedir = @nodedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+pluginsdir = @pluginsdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pythondir = @pythondir@
+registrydir = @registrydir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+varlibdir = @varlibdir@
+webdir = @webdir@
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ python.d.plugin \
+ $(NULL)
+
+SUFFIXES = .in
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA = python.d.plugin.in README.md $(NULL) \
+ adaptec_raid/README.md adaptec_raid/Makefile.inc \
+ apache/README.md apache/Makefile.inc beanstalk/README.md \
+ beanstalk/Makefile.inc bind_rndc/README.md \
+ bind_rndc/Makefile.inc boinc/README.md boinc/Makefile.inc \
+ ceph/README.md ceph/Makefile.inc chrony/README.md \
+ chrony/Makefile.inc couchdb/README.md couchdb/Makefile.inc \
+ cpufreq/README.md cpufreq/Makefile.inc cpuidle/README.md \
+ cpuidle/Makefile.inc dnsdist/README.md dnsdist/Makefile.inc \
+ dns_query_time/README.md dns_query_time/Makefile.inc \
+ dockerd/README.md dockerd/Makefile.inc dovecot/README.md \
+ dovecot/Makefile.inc elasticsearch/README.md \
+ elasticsearch/Makefile.inc example/README.md \
+ example/Makefile.inc exim/README.md exim/Makefile.inc \
+ fail2ban/README.md fail2ban/Makefile.inc freeradius/README.md \
+ freeradius/Makefile.inc go_expvar/README.md \
+ go_expvar/Makefile.inc haproxy/README.md haproxy/Makefile.inc \
+ hddtemp/README.md hddtemp/Makefile.inc httpcheck/README.md \
+ httpcheck/Makefile.inc icecast/README.md icecast/Makefile.inc \
+ ipfs/README.md ipfs/Makefile.inc isc_dhcpd/README.md \
+ isc_dhcpd/Makefile.inc linux_power_supply/README.md \
+ linux_power_supply/Makefile.inc litespeed/README.md \
+ litespeed/Makefile.inc logind/README.md logind/Makefile.inc \
+ mdstat/README.md mdstat/Makefile.inc megacli/README.md \
+ megacli/Makefile.inc memcached/README.md \
+ memcached/Makefile.inc mongodb/README.md mongodb/Makefile.inc \
+ monit/README.md monit/Makefile.inc mysql/README.md \
+ mysql/Makefile.inc nginx/README.md nginx/Makefile.inc \
+ nginx_plus/README.md nginx_plus/Makefile.inc nsd/README.md \
+ nsd/Makefile.inc ntpd/README.md ntpd/Makefile.inc \
+ ovpn_status_log/README.md ovpn_status_log/Makefile.inc \
+ phpfpm/README.md phpfpm/Makefile.inc portcheck/README.md \
+ portcheck/Makefile.inc postfix/README.md postfix/Makefile.inc \
+ postgres/README.md postgres/Makefile.inc powerdns/README.md \
+ powerdns/Makefile.inc proxysql/README.md proxysql/Makefile.inc \
+ puppet/README.md puppet/Makefile.inc rabbitmq/README.md \
+ rabbitmq/Makefile.inc redis/README.md redis/Makefile.inc \
+ rethinkdbs/README.md rethinkdbs/Makefile.inc \
+ retroshare/README.md retroshare/Makefile.inc samba/README.md \
+ samba/Makefile.inc sensors/README.md sensors/Makefile.inc \
+ smartd_log/README.md smartd_log/Makefile.inc \
+ spigotmc/README.md spigotmc/Makefile.inc springboot/README.md \
+ springboot/Makefile.inc squid/README.md squid/Makefile.inc \
+ tomcat/README.md tomcat/Makefile.inc traefik/README.md \
+ traefik/Makefile.inc unbound/README.md unbound/Makefile.inc \
+ uwsgi/README.md uwsgi/Makefile.inc varnish/README.md \
+ varnish/Makefile.inc w1sensor/README.md w1sensor/Makefile.inc \
+ web_log/README.md web_log/Makefile.inc
+dist_python_SCRIPTS = \
+ $(NULL)
+
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+
+# install these files
+dist_python_DATA = $(NULL) adaptec_raid/adaptec_raid.chart.py \
+ apache/apache.chart.py beanstalk/beanstalk.chart.py \
+ bind_rndc/bind_rndc.chart.py boinc/boinc.chart.py \
+ ceph/ceph.chart.py chrony/chrony.chart.py \
+ couchdb/couchdb.chart.py cpufreq/cpufreq.chart.py \
+ cpuidle/cpuidle.chart.py dnsdist/dnsdist.chart.py \
+ dns_query_time/dns_query_time.chart.py \
+ dockerd/dockerd.chart.py dovecot/dovecot.chart.py \
+ elasticsearch/elasticsearch.chart.py example/example.chart.py \
+ exim/exim.chart.py fail2ban/fail2ban.chart.py \
+ freeradius/freeradius.chart.py go_expvar/go_expvar.chart.py \
+ haproxy/haproxy.chart.py hddtemp/hddtemp.chart.py \
+ httpcheck/httpcheck.chart.py icecast/icecast.chart.py \
+ ipfs/ipfs.chart.py isc_dhcpd/isc_dhcpd.chart.py \
+ linux_power_supply/linux_power_supply.chart.py \
+ litespeed/litespeed.chart.py logind/logind.chart.py \
+ mdstat/mdstat.chart.py megacli/megacli.chart.py \
+ memcached/memcached.chart.py mongodb/mongodb.chart.py \
+ monit/monit.chart.py mysql/mysql.chart.py nginx/nginx.chart.py \
+ nginx_plus/nginx_plus.chart.py nsd/nsd.chart.py \
+ ntpd/ntpd.chart.py ovpn_status_log/ovpn_status_log.chart.py \
+ phpfpm/phpfpm.chart.py portcheck/portcheck.chart.py \
+ postfix/postfix.chart.py postgres/postgres.chart.py \
+ powerdns/powerdns.chart.py proxysql/proxysql.chart.py \
+ puppet/puppet.chart.py rabbitmq/rabbitmq.chart.py \
+ redis/redis.chart.py rethinkdbs/rethinkdbs.chart.py \
+ retroshare/retroshare.chart.py samba/samba.chart.py \
+ sensors/sensors.chart.py smartd_log/smartd_log.chart.py \
+ spigotmc/spigotmc.chart.py springboot/springboot.chart.py \
+ squid/squid.chart.py tomcat/tomcat.chart.py \
+ traefik/traefik.chart.py unbound/unbound.chart.py \
+ uwsgi/uwsgi.chart.py varnish/varnish.chart.py \
+ w1sensor/w1sensor.chart.py web_log/web_log.chart.py
+userpythonconfigdir = $(configdir)/python.d
+dist_userpythonconfig_DATA = \
+ $(top_srcdir)/installer/.keep \
+ $(NULL)
+
+pythonconfigdir = $(libconfigdir)/python.d
+dist_pythonconfig_DATA = $(top_srcdir)/installer/.keep $(NULL) \
+ adaptec_raid/adaptec_raid.conf apache/apache.conf \
+ beanstalk/beanstalk.conf bind_rndc/bind_rndc.conf \
+ boinc/boinc.conf ceph/ceph.conf chrony/chrony.conf \
+ couchdb/couchdb.conf cpufreq/cpufreq.conf cpuidle/cpuidle.conf \
+ dnsdist/dnsdist.conf dns_query_time/dns_query_time.conf \
+ dockerd/dockerd.conf dovecot/dovecot.conf \
+ elasticsearch/elasticsearch.conf example/example.conf \
+ exim/exim.conf fail2ban/fail2ban.conf \
+ freeradius/freeradius.conf go_expvar/go_expvar.conf \
+ haproxy/haproxy.conf hddtemp/hddtemp.conf \
+ httpcheck/httpcheck.conf icecast/icecast.conf ipfs/ipfs.conf \
+ isc_dhcpd/isc_dhcpd.conf \
+ linux_power_supply/linux_power_supply.conf \
+ litespeed/litespeed.conf logind/logind.conf mdstat/mdstat.conf \
+ megacli/megacli.conf memcached/memcached.conf \
+ mongodb/mongodb.conf monit/monit.conf mysql/mysql.conf \
+ nginx/nginx.conf nginx_plus/nginx_plus.conf nsd/nsd.conf \
+ ntpd/ntpd.conf ovpn_status_log/ovpn_status_log.conf \
+ phpfpm/phpfpm.conf portcheck/portcheck.conf \
+ postfix/postfix.conf postgres/postgres.conf \
+ powerdns/powerdns.conf proxysql/proxysql.conf \
+ puppet/puppet.conf rabbitmq/rabbitmq.conf redis/redis.conf \
+ rethinkdbs/rethinkdbs.conf retroshare/retroshare.conf \
+ samba/samba.conf sensors/sensors.conf \
+ smartd_log/smartd_log.conf spigotmc/spigotmc.conf \
+ springboot/springboot.conf squid/squid.conf tomcat/tomcat.conf \
+ traefik/traefik.conf unbound/unbound.conf uwsgi/uwsgi.conf \
+ varnish/varnish.conf w1sensor/w1sensor.conf \
+ web_log/web_log.conf
+pythonmodulesdir = $(pythondir)/python_modules
+dist_pythonmodules_DATA = \
+ python_modules/__init__.py \
+ $(NULL)
+
+basesdir = $(pythonmodulesdir)/bases
+dist_bases_DATA = \
+ python_modules/bases/__init__.py \
+ python_modules/bases/charts.py \
+ python_modules/bases/collection.py \
+ python_modules/bases/loaders.py \
+ python_modules/bases/loggers.py \
+ $(NULL)
+
+bases_framework_servicesdir = $(basesdir)/FrameworkServices
+dist_bases_framework_services_DATA = \
+ python_modules/bases/FrameworkServices/__init__.py \
+ python_modules/bases/FrameworkServices/ExecutableService.py \
+ python_modules/bases/FrameworkServices/LogService.py \
+ python_modules/bases/FrameworkServices/MySQLService.py \
+ python_modules/bases/FrameworkServices/SimpleService.py \
+ python_modules/bases/FrameworkServices/SocketService.py \
+ python_modules/bases/FrameworkServices/UrlService.py \
+ $(NULL)
+
+third_partydir = $(pythonmodulesdir)/third_party
+dist_third_party_DATA = \
+ python_modules/third_party/__init__.py \
+ python_modules/third_party/ordereddict.py \
+ python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
+ $(NULL)
+
+pythonyaml2dir = $(pythonmodulesdir)/pyyaml2
+dist_pythonyaml2_DATA = \
+ python_modules/pyyaml2/__init__.py \
+ python_modules/pyyaml2/composer.py \
+ python_modules/pyyaml2/constructor.py \
+ python_modules/pyyaml2/cyaml.py \
+ python_modules/pyyaml2/dumper.py \
+ python_modules/pyyaml2/emitter.py \
+ python_modules/pyyaml2/error.py \
+ python_modules/pyyaml2/events.py \
+ python_modules/pyyaml2/loader.py \
+ python_modules/pyyaml2/nodes.py \
+ python_modules/pyyaml2/parser.py \
+ python_modules/pyyaml2/reader.py \
+ python_modules/pyyaml2/representer.py \
+ python_modules/pyyaml2/resolver.py \
+ python_modules/pyyaml2/scanner.py \
+ python_modules/pyyaml2/serializer.py \
+ python_modules/pyyaml2/tokens.py \
+ $(NULL)
+
+pythonyaml3dir = $(pythonmodulesdir)/pyyaml3
+dist_pythonyaml3_DATA = \
+ python_modules/pyyaml3/__init__.py \
+ python_modules/pyyaml3/composer.py \
+ python_modules/pyyaml3/constructor.py \
+ python_modules/pyyaml3/cyaml.py \
+ python_modules/pyyaml3/dumper.py \
+ python_modules/pyyaml3/emitter.py \
+ python_modules/pyyaml3/error.py \
+ python_modules/pyyaml3/events.py \
+ python_modules/pyyaml3/loader.py \
+ python_modules/pyyaml3/nodes.py \
+ python_modules/pyyaml3/parser.py \
+ python_modules/pyyaml3/reader.py \
+ python_modules/pyyaml3/representer.py \
+ python_modules/pyyaml3/resolver.py \
+ python_modules/pyyaml3/scanner.py \
+ python_modules/pyyaml3/serializer.py \
+ python_modules/pyyaml3/tokens.py \
+ $(NULL)
+
+python_urllib3dir = $(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir = $(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir = $(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir = $(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir = $(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir = $(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir = $(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .in
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu collectors/python.d.plugin/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+$(top_srcdir)/build/subst.inc $(srcdir)/adaptec_raid/Makefile.inc $(srcdir)/apache/Makefile.inc $(srcdir)/beanstalk/Makefile.inc $(srcdir)/bind_rndc/Makefile.inc $(srcdir)/boinc/Makefile.inc $(srcdir)/ceph/Makefile.inc $(srcdir)/chrony/Makefile.inc $(srcdir)/couchdb/Makefile.inc $(srcdir)/cpufreq/Makefile.inc $(srcdir)/cpuidle/Makefile.inc $(srcdir)/dnsdist/Makefile.inc $(srcdir)/dns_query_time/Makefile.inc $(srcdir)/dockerd/Makefile.inc $(srcdir)/dovecot/Makefile.inc $(srcdir)/elasticsearch/Makefile.inc $(srcdir)/example/Makefile.inc $(srcdir)/exim/Makefile.inc $(srcdir)/fail2ban/Makefile.inc $(srcdir)/freeradius/Makefile.inc $(srcdir)/go_expvar/Makefile.inc $(srcdir)/haproxy/Makefile.inc $(srcdir)/hddtemp/Makefile.inc $(srcdir)/httpcheck/Makefile.inc $(srcdir)/icecast/Makefile.inc $(srcdir)/ipfs/Makefile.inc $(srcdir)/isc_dhcpd/Makefile.inc $(srcdir)/linux_power_supply/Makefile.inc $(srcdir)/litespeed/Makefile.inc $(srcdir)/logind/Makefile.inc $(srcdir)/mdstat/Makefile.inc $(srcdir)/megacli/Makefile.inc $(srcdir)/memcached/Makefile.inc $(srcdir)/mongodb/Makefile.inc $(srcdir)/monit/Makefile.inc $(srcdir)/mysql/Makefile.inc $(srcdir)/nginx/Makefile.inc $(srcdir)/nginx_plus/Makefile.inc $(srcdir)/nsd/Makefile.inc $(srcdir)/ntpd/Makefile.inc $(srcdir)/ovpn_status_log/Makefile.inc $(srcdir)/phpfpm/Makefile.inc $(srcdir)/portcheck/Makefile.inc $(srcdir)/postfix/Makefile.inc $(srcdir)/postgres/Makefile.inc $(srcdir)/powerdns/Makefile.inc $(srcdir)/proxysql/Makefile.inc $(srcdir)/puppet/Makefile.inc $(srcdir)/rabbitmq/Makefile.inc $(srcdir)/redis/Makefile.inc $(srcdir)/rethinkdbs/Makefile.inc $(srcdir)/retroshare/Makefile.inc $(srcdir)/samba/Makefile.inc $(srcdir)/sensors/Makefile.inc $(srcdir)/smartd_log/Makefile.inc $(srcdir)/spigotmc/Makefile.inc $(srcdir)/springboot/Makefile.inc $(srcdir)/squid/Makefile.inc $(srcdir)/tomcat/Makefile.inc $(srcdir)/traefik/Makefile.inc $(srcdir)/unbound/Makefile.inc $(srcdir)/uwsgi/Makefile.inc $(srcdir)/varnish/Makefile.inc $(srcdir)/w1sensor/Makefile.inc $(srcdir)/web_log/Makefile.inc:
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-dist_pluginsSCRIPTS: $(dist_plugins_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pluginsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pluginsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pluginsdir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pluginsdir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pluginsSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_plugins_SCRIPTS)'; test -n "$(pluginsdir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pluginsdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonSCRIPTS: $(dist_python_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pythondir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pythondir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-dist_pythonSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_SCRIPTS)'; test -n "$(pythondir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
+install-dist_basesDATA: $(dist_bases_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(basesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(basesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(basesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(basesdir)" || exit $$?; \
+ done
+
+uninstall-dist_basesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_bases_DATA)'; test -n "$(basesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(basesdir)'; $(am__uninstall_files_from_dir)
+install-dist_bases_framework_servicesDATA: $(dist_bases_framework_services_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(bases_framework_servicesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(bases_framework_servicesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bases_framework_servicesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(bases_framework_servicesdir)" || exit $$?; \
+ done
+
+uninstall-dist_bases_framework_servicesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_bases_framework_services_DATA)'; test -n "$(bases_framework_servicesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(bases_framework_servicesdir)'; $(am__uninstall_files_from_dir)
+install-dist_libconfigDATA: $(dist_libconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(libconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(libconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(libconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(libconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_libconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_libconfig_DATA)'; test -n "$(libconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(libconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonDATA: $(dist_python_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythondir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythondir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythondir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythondir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_DATA)'; test -n "$(pythondir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythondir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3DATA: $(dist_python_urllib3_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3dir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_DATA)'; test -n "$(python_urllib3dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3dir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_backportsDATA: $(dist_python_urllib3_backports_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_backportsdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_backportsdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_backportsdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_backportsDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_backports_DATA)'; test -n "$(python_urllib3_backportsdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_backportsdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_contribDATA: $(dist_python_urllib3_contrib_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_contribdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_contribdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_contribdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_contribdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_contribDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_contrib_DATA)'; test -n "$(python_urllib3_contribdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_contribdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_packagesDATA: $(dist_python_urllib3_packages_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_packagesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_packagesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_packagesdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_packagesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_packages_DATA)'; test -n "$(python_urllib3_packagesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_packagesdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_securetransportDATA: $(dist_python_urllib3_securetransport_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_securetransportdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_securetransportdir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_securetransportDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_securetransport_DATA)'; test -n "$(python_urllib3_securetransportdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_securetransportdir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_ssl_match_hostnameDATA: $(dist_python_urllib3_ssl_match_hostname_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_ssl_match_hostnameDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_ssl_match_hostname_DATA)'; test -n "$(python_urllib3_ssl_match_hostnamedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)'; $(am__uninstall_files_from_dir)
+install-dist_python_urllib3_utilDATA: $(dist_python_urllib3_util_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(python_urllib3_utildir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(python_urllib3_utildir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(python_urllib3_utildir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(python_urllib3_utildir)" || exit $$?; \
+ done
+
+uninstall-dist_python_urllib3_utilDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_python_urllib3_util_DATA)'; test -n "$(python_urllib3_utildir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(python_urllib3_utildir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonconfigDATA: $(dist_pythonconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonconfig_DATA)'; test -n "$(pythonconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonconfigdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonmodulesDATA: $(dist_pythonmodules_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonmodulesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonmodulesdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonmodulesdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonmodulesdir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonmodulesDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonmodules_DATA)'; test -n "$(pythonmodulesdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonmodulesdir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonyaml2DATA: $(dist_pythonyaml2_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml2dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml2dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml2dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml2dir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonyaml2DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonyaml2_DATA)'; test -n "$(pythonyaml2dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonyaml2dir)'; $(am__uninstall_files_from_dir)
+install-dist_pythonyaml3DATA: $(dist_pythonyaml3_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pythonyaml3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pythonyaml3dir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pythonyaml3dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pythonyaml3dir)" || exit $$?; \
+ done
+
+uninstall-dist_pythonyaml3DATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_pythonyaml3_DATA)'; test -n "$(pythonyaml3dir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(pythonyaml3dir)'; $(am__uninstall_files_from_dir)
+install-dist_third_partyDATA: $(dist_third_party_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(third_partydir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(third_partydir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(third_partydir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(third_partydir)" || exit $$?; \
+ done
+
+uninstall-dist_third_partyDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_third_party_DATA)'; test -n "$(third_partydir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(third_partydir)'; $(am__uninstall_files_from_dir)
+install-dist_userpythonconfigDATA: $(dist_userpythonconfig_DATA)
+ @$(NORMAL_INSTALL)
+ @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(userpythonconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(userpythonconfigdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(userpythonconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(userpythonconfigdir)" || exit $$?; \
+ done
+
+uninstall-dist_userpythonconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_userpythonconfig_DATA)'; test -n "$(userpythonconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(userpythonconfigdir)'; $(am__uninstall_files_from_dir)
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(DATA)
+installdirs:
+ for dir in "$(DESTDIR)$(pluginsdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(basesdir)" "$(DESTDIR)$(bases_framework_servicesdir)" "$(DESTDIR)$(libconfigdir)" "$(DESTDIR)$(pythondir)" "$(DESTDIR)$(python_urllib3dir)" "$(DESTDIR)$(python_urllib3_backportsdir)" "$(DESTDIR)$(python_urllib3_contribdir)" "$(DESTDIR)$(python_urllib3_packagesdir)" "$(DESTDIR)$(python_urllib3_securetransportdir)" "$(DESTDIR)$(python_urllib3_ssl_match_hostnamedir)" "$(DESTDIR)$(python_urllib3_utildir)" "$(DESTDIR)$(pythonconfigdir)" "$(DESTDIR)$(pythonmodulesdir)" "$(DESTDIR)$(pythonyaml2dir)" "$(DESTDIR)$(pythonyaml3dir)" "$(DESTDIR)$(third_partydir)" "$(DESTDIR)$(userpythonconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES)
+clean: clean-am
+
+clean-am: clean-generic mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_basesDATA \
+ install-dist_bases_framework_servicesDATA \
+ install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
+ install-dist_pythonDATA install-dist_pythonSCRIPTS \
+ install-dist_python_urllib3DATA \
+ install-dist_python_urllib3_backportsDATA \
+ install-dist_python_urllib3_contribDATA \
+ install-dist_python_urllib3_packagesDATA \
+ install-dist_python_urllib3_securetransportDATA \
+ install-dist_python_urllib3_ssl_match_hostnameDATA \
+ install-dist_python_urllib3_utilDATA \
+ install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
+ install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
+ install-dist_third_partyDATA install-dist_userpythonconfigDATA
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_basesDATA \
+ uninstall-dist_bases_framework_servicesDATA \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
+ uninstall-dist_python_urllib3DATA \
+ uninstall-dist_python_urllib3_backportsDATA \
+ uninstall-dist_python_urllib3_contribDATA \
+ uninstall-dist_python_urllib3_packagesDATA \
+ uninstall-dist_python_urllib3_securetransportDATA \
+ uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
+ uninstall-dist_python_urllib3_utilDATA \
+ uninstall-dist_pythonconfigDATA \
+ uninstall-dist_pythonmodulesDATA \
+ uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
+ uninstall-dist_third_partyDATA \
+ uninstall-dist_userpythonconfigDATA
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
+ ctags-am distclean distclean-generic distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dist_basesDATA \
+ install-dist_bases_framework_servicesDATA \
+ install-dist_libconfigDATA install-dist_pluginsSCRIPTS \
+ install-dist_pythonDATA install-dist_pythonSCRIPTS \
+ install-dist_python_urllib3DATA \
+ install-dist_python_urllib3_backportsDATA \
+ install-dist_python_urllib3_contribDATA \
+ install-dist_python_urllib3_packagesDATA \
+ install-dist_python_urllib3_securetransportDATA \
+ install-dist_python_urllib3_ssl_match_hostnameDATA \
+ install-dist_python_urllib3_utilDATA \
+ install-dist_pythonconfigDATA install-dist_pythonmodulesDATA \
+ install-dist_pythonyaml2DATA install-dist_pythonyaml3DATA \
+ install-dist_third_partyDATA install-dist_userpythonconfigDATA \
+ install-dvi install-dvi-am install-exec install-exec-am \
+ install-html install-html-am install-info install-info-am \
+ install-man install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags-am \
+ uninstall uninstall-am uninstall-dist_basesDATA \
+ uninstall-dist_bases_framework_servicesDATA \
+ uninstall-dist_libconfigDATA uninstall-dist_pluginsSCRIPTS \
+ uninstall-dist_pythonDATA uninstall-dist_pythonSCRIPTS \
+ uninstall-dist_python_urllib3DATA \
+ uninstall-dist_python_urllib3_backportsDATA \
+ uninstall-dist_python_urllib3_contribDATA \
+ uninstall-dist_python_urllib3_packagesDATA \
+ uninstall-dist_python_urllib3_securetransportDATA \
+ uninstall-dist_python_urllib3_ssl_match_hostnameDATA \
+ uninstall-dist_python_urllib3_utilDATA \
+ uninstall-dist_pythonconfigDATA \
+ uninstall-dist_pythonmodulesDATA \
+ uninstall-dist_pythonyaml2DATA uninstall-dist_pythonyaml3DATA \
+ uninstall-dist_third_partyDATA \
+ uninstall-dist_userpythonconfigDATA
+
+.in:
+ if sed \
+ -e 's#[@]localstatedir_POST@#$(localstatedir)#g' \
+ -e 's#[@]sbindir_POST@#$(sbindir)#g' \
+ -e 's#[@]sysconfdir_POST@#$(sysconfdir)#g' \
+ -e 's#[@]pythondir_POST@#$(pythondir)#g' \
+ -e 's#[@]configdir_POST@#$(configdir)#g' \
+ -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
+ -e 's#[@]cachedir_POST@#$(cachedir)#g' \
+ $< > $@.tmp; then \
+ mv "$@.tmp" "$@"; \
+ else \
+ rm -f "$@.tmp"; \
+ false; \
+ fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
new file mode 100644
index 000000000..df24cd18f
--- /dev/null
+++ b/collectors/python.d.plugin/README.md
@@ -0,0 +1,198 @@
+# python.d.plugin
+
+`python.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by netdata
+3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
+
+
+## Disclaimer
+
+Every module should be compatible with python2 and python3.
+All third party libraries should be installed system-wide or in `python_modules` directory.
+Module configurations are written in YAML and **pyYAML is required**.
+
+Every configuration file must have one of two formats:
+
+- Configuration for only one job:
+
+```yaml
+update_every : 2 # update frequency
+retries : 1 # how many failures in update() is tolerated
+priority : 20000 # where it is shown on dashboard
+
+other_var1 : bla # variables passed to module
+other_var2 : alb
+```
+
+- Configuration for many jobs (ex. mysql):
+
+```yaml
+# module defaults:
+update_every : 2
+retries : 1
+priority : 20000
+
+local: # job name
+ update_every : 5 # job update frequency
+ other_var1 : some_val # module specific variable
+
+other_job:
+ priority : 5 # job position on dashboard
+ retries : 20 # job retries
+ other_var2 : val # module specific variable
+```
+
+`update_every`, `retries`, and `priority` are always optional.
+
+---
+
+## How to write a new module
+
+Writing new python module is simple. You just need to remember to include 5 major things:
+- **ORDER** global list
+- **CHART** global dictionary
+- **Service** class
+- **_get_data** method
+- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**)
+
+If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](https://github.com/netdata/netdata/wiki/New-Module-PR-Checklist) beforehand to make sure you have updated all the files you need to.
+
+### Global variables `ORDER` and `CHART`
+
+`ORDER` list should contain the order of chart ids. Example:
+```py
+ORDER = ['first_chart', 'second_chart', 'third_chart']
+```
+
+`CHART` dictionary is a little bit trickier. It should contain the chart definition in following format:
+```py
+CHART = {
+ id: {
+ 'options': [name, title, units, family, context, charttype],
+ 'lines': [
+ [unique_dimension_name, name, algorithm, multiplier, divisor]
+ ]}
+```
+
+All names are better explained in the [External Plugins](../) section.
+Parameters like `priority` and `update_every` are handled by `python.d.plugin`.
+
+### `Service` class
+
+Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes:
+
+- `SimpleService`
+- `UrlService`
+- `SocketService`
+- `LogService`
+- `ExecutableService`
+
+Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables.
+
+Simple example:
+```py
+from base import UrlService
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+```
+
+### `_get_data` collector/parser
+
+This method should grab raw data from `_get_raw_data`, parse it, and return a dictionary where keys are unique dimension names or `None` if no data is collected.
+
+Example:
+```py
+def _get_data(self):
+ try:
+ raw = self._get_raw_data().split(" ")
+ return {'active': int(raw[2])}
+ except (ValueError, AttributeError):
+ return None
+```
+
+More about framework classes
+============================
+
+Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor.
+
+If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example:
+```py
+def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ try:
+ self.baseurl = str(self.configuration['baseurl'])
+ except (KeyError, TypeError):
+ self.baseurl = "http://localhost:5001"
+```
+
+Classes implement `_get_raw_data` which should be used to grab raw data. This method usually returns a list of strings.
+
+### `SimpleService`
+
+_This is last resort class, if a new module cannot be written by using other framework class this one can be used._
+
+_Example: `mysql`, `sensors`_
+
+It is the lowest-level class which implements most of module logic, like:
+- threading
+- handling run times
+- chart formatting
+- logging
+- chart creation and updating
+
+### `LogService`
+
+_Examples: `apache_cache`, `nginx_log`_
+
+_Variable from config file_: `log_path`.
+
+Object created from this class reads new lines from file specified in `log_path` variable. It will check if file exists and is readable. Also `_get_raw_data` returns list of strings where each string is one line from file specified in `log_path`.
+
+### `ExecutableService`
+
+_Examples: `exim`, `postfix`_
+
+_Variable from config file_: `command`.
+
+This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of:
+- '&'
+- '|'
+- ';'
+- '>'
+- '<'
+
+For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`.
+
+`_get_raw_data` returns list of decoded lines returned by `command`.
+
+### UrlService
+
+_Examples: `apache`, `nginx`, `tomcat`_
+
+_Variables from config file_: `url`, `user`, `pass`.
+
+If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials.
+
+`_get_raw_data` returns list of utf-8 decoded strings (lines).
+
+### SocketService
+
+_Examples: `dovecot`, `redis`_
+
+_Variables from config file_: `unix_socket`, `host`, `port`, `request`.
+
+Object will try execute `request` using either `unix_socket` or TCP/IP socket with combination of `host` and `port`. This can access unix sockets with SOCK_STREAM or SOCK_DGRAM protocols and TCP/IP sockets in version 4 and 6 with SOCK_STREAM setting.
+
+Sockets are accessed in non-blocking mode with 15 second timeout.
+
+After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method.
+
+`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way. \ No newline at end of file
diff --git a/collectors/python.d.plugin/adaptec_raid/Makefile.inc b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
new file mode 100644
index 000000000..716cdb235
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += adaptec_raid/adaptec_raid.chart.py
+dist_pythonconfig_DATA += adaptec_raid/adaptec_raid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += adaptec_raid/README.md adaptec_raid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
new file mode 100644
index 000000000..499dc9190
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -0,0 +1,46 @@
+# adaptec raid
+
+Module collects logical and physical devices health metrics.
+
+**Requirements:**
+* `arcconf` program
+* `sudo` program
+* `netdata` user needs to be able to sudo the `arcconf` program without password
+
+To grab stats it executes:
+ * `sudo -n arcconf GETCONFIG 1 LD`
+ * `sudo -n arcconf GETCONFIG 1 PD`
+
+
+It produces:
+
+1. **Logical Device Status**
+
+2. **Physical Device State**
+
+3. **Physical Device S.M.A.R.T warnings**
+
+4. **Physical Device Temperature**
+
+### prerequisite
+This module uses `arcconf` which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `arcconf` as root without password.
+
+Add to `sudoers`:
+
+ netdata ALL=(root) NOPASSWD: /path/to/arcconf
+
+### configuration
+
+ **adaptec_raid** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+
+```yaml
+adaptec_raid: yes
+```
+
+#### Screenshot:
+
+![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png)
+
+---
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
new file mode 100644
index 000000000..1fb1e4336
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Description: adaptec_raid netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from copy import deepcopy
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+
+disabled_by_default = True
+
+update_every = 5
+
+ORDER = [
+ 'ld_status',
+ 'pd_state',
+ 'pd_smart_warnings',
+ 'pd_temperature',
+]
+
+CHARTS = {
+ 'ld_status': {
+ 'options': [None, 'Status Is Not OK', 'bool', 'logical devices', 'adapter_raid.ld_status', 'line'],
+ 'lines': []
+ },
+ 'pd_state': {
+ 'options': [None, 'State Is Not OK', 'bool', 'physical devices', 'adapter_raid.pd_state', 'line'],
+ 'lines': []
+ },
+ 'pd_smart_warnings': {
+ 'options': [None, 'S.M.A.R.T warnings', 'count', 'physical devices',
+ 'adapter_raid.smart_warnings', 'line'],
+ 'lines': []
+ },
+ 'pd_temperature': {
+ 'options': [None, 'Temperature', 'celsius', 'physical devices', 'adapter_raid.temperature', 'line'],
+ 'lines': []
+ },
+}
+
+SUDO = 'sudo'
+ARCCONF = 'arcconf'
+
+BAD_LD_STATUS = (
+ 'Degraded',
+ 'Failed',
+)
+
+GOOD_PD_STATUS = (
+ 'Online',
+)
+
+RE_LD = re.compile(
+ r'Logical device number\s+([0-9]+).*?'
+ r'Status of logical device\s+: ([a-zA-Z]+)'
+)
+
+
+def find_lds(d):
+ d = ' '.join(v.strip() for v in d)
+ return [LD(*v) for v in RE_LD.findall(d)]
+
+
+def find_pds(d):
+ pds = list()
+ pd = PD()
+
+ for row in d:
+ row = row.strip()
+ if row.startswith('Device #'):
+ pd = PD()
+ pd.id = row.split('#')[-1]
+ elif not pd.id:
+ continue
+
+ if row.startswith('State'):
+ v = row.split()[-1]
+ pd.state = v
+ elif row.startswith('S.M.A.R.T. warnings'):
+ v = row.split()[-1]
+ pd.smart_warnings = v
+ elif row.startswith('Temperature'):
+ v = row.split(':')[-1].split()[0]
+ pd.temperature = v
+ elif row.startswith('NCQ status'):
+ if pd.id and pd.state and pd.smart_warnings:
+ pds.append(pd)
+ pd = PD()
+
+ return pds
+
+
+class LD:
+ def __init__(self, ld_id, status):
+ self.id = ld_id
+ self.status = status
+
+ def data(self):
+ return {
+ 'ld_{0}_status'.format(self.id): int(self.status in BAD_LD_STATUS)
+ }
+
+
+class PD:
+ def __init__(self):
+ self.id = None
+ self.state = None
+ self.smart_warnings = None
+ self.temperature = None
+
+ def data(self):
+ data = {
+ 'pd_{0}_state'.format(self.id): int(self.state not in GOOD_PD_STATUS),
+ 'pd_{0}_smart_warnings'.format(self.id): self.smart_warnings,
+ }
+ if self.temperature and self.temperature.isdigit():
+ data['pd_{0}_temperature'.format(self.id)] = self.temperature
+
+ return data
+
+
+class Arcconf:
+ def __init__(self, arcconf):
+ self.arcconf = arcconf
+
+ def ld_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'LD']
+
+ def pd_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'PD']
+
+
+# TODO: hardcoded sudo...
+class SudoArcconf:
+ def __init__(self, arcconf, sudo):
+ self.arcconf = Arcconf(arcconf)
+ self.sudo = sudo
+
+ def ld_info(self):
+ return [self.sudo, '-n'] + self.arcconf.ld_info()
+
+ def pd_info(self):
+ return [self.sudo, '-n'] + self.arcconf.pd_info()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.use_sudo = self.configuration.get('use_sudo', True)
+ self.arcconf = None
+
+ def execute(self, command, stderr=False):
+ return self._get_raw_data(command=command, stderr=stderr)
+
+ def check(self):
+ arcconf = find_binary(ARCCONF)
+ if not arcconf:
+ self.error('can\'t locate "{0}" binary'.format(ARCCONF))
+ return False
+
+ sudo = find_binary(SUDO)
+ if self.use_sudo:
+ if not sudo:
+ self.error('can\'t locate "{0}" binary'.format(SUDO))
+ return False
+ err = self.execute([sudo, '-n', '-v'], True)
+ if err:
+ self.error(' '.join(err))
+ return False
+
+ if self.use_sudo:
+ self.arcconf = SudoArcconf(arcconf, sudo)
+ else:
+ self.arcconf = Arcconf(arcconf)
+
+ lds = self.get_lds()
+ if not lds:
+ return False
+
+ self.debug('discovered logical devices ids: {0}'.format([ld.id for ld in lds]))
+
+ pds = self.get_pds()
+ if not pds:
+ return False
+
+ self.debug('discovered physical devices ids: {0}'.format([pd.id for pd in pds]))
+
+ self.update_charts(lds, pds)
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ for ld in self.get_lds():
+ data.update(ld.data())
+
+ for pd in self.get_pds():
+ data.update(pd.data())
+
+ return data
+
+ def get_lds(self):
+ raw_lds = self.execute(self.arcconf.ld_info())
+ if not raw_lds:
+ return None
+
+ lds = find_lds(raw_lds)
+ if not lds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.ld_info())))
+ self.debug('output: {0}'.format(raw_lds))
+ return None
+ return lds
+
+ def get_pds(self):
+ raw_pds = self.execute(self.arcconf.pd_info())
+ if not raw_pds:
+ return None
+
+ pds = find_pds(raw_pds)
+ if not pds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.pd_info())))
+ self.debug('output: {0}'.format(raw_pds))
+ return None
+ return pds
+
+ def update_charts(self, lds, pds):
+ charts = self.definitions
+ for ld in lds:
+ dim = ['ld_{0}_status'.format(ld.id), 'ld {0}'.format(ld.id)]
+ charts['ld_status']['lines'].append(dim)
+
+ for pd in pds:
+ dim = ['pd_{0}_state'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_state']['lines'].append(dim)
+
+ dim = ['pd_{0}_smart_warnings'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_smart_warnings']['lines'].append(dim)
+
+ dim = ['pd_{0}_temperature'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_temperature']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
new file mode 100644
index 000000000..253cbf5a9
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
@@ -0,0 +1,55 @@
+# netdata python.d.plugin configuration for adaptec raid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/apache/Makefile.inc b/collectors/python.d.plugin/apache/Makefile.inc
new file mode 100644
index 000000000..70a421550
--- /dev/null
+++ b/collectors/python.d.plugin/apache/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += apache/apache.chart.py
+dist_pythonconfig_DATA += apache/apache.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += apache/README.md apache/Makefile.inc
+
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
new file mode 100644
index 000000000..c6d1d126a
--- /dev/null
+++ b/collectors/python.d.plugin/apache/README.md
@@ -0,0 +1,59 @@
+# apache
+
+This module will monitor one or more Apache servers depending on configuration.
+
+**Requirements:**
+ * apache with enabled `mod_status`
+
+It produces the following charts:
+
+1. **Requests** in requests/s
+ * requests
+
+2. **Connections**
+ * connections
+
+3. **Async Connections**
+ * keepalive
+ * closing
+ * writing
+
+4. **Bandwidth** in kilobytes/s
+ * sent
+
+5. **Workers**
+ * idle
+ * busy
+
+6. **Lifetime Avg. Requests/s** in requests/s
+ * requests_sec
+
+7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
+ * size_sec
+
+8. **Lifetime Avg. Response Size** in bytes/request
+ * size_req
+
+### configuration
+
+Needs only `url` to server's `server-status?auto`
+
+Here is an example for 2 servers:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/server-status?auto'
+ retries : 20
+
+remote:
+ url : 'http://www.apache.org/server-status?auto'
+ update_every : 5
+ retries : 4
+```
+
+Without configuration, module attempts to connect to `http://localhost/server-status?auto`
+
+---
diff --git a/python.d/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
index 789b3c099..d136274d0 100644
--- a/python.d/apache.chart.py
+++ b/collectors/python.d.plugin/apache/apache.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: apache netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.UrlService import UrlService
@@ -25,63 +26,65 @@ CHARTS = {
'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
'statistics', 'apache.bytesperreq', 'area'],
'lines': [
- ["size_req"]
+ ['size_req']
]},
'workers': {
'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
'lines': [
- ["idle"],
- ["busy"],
+ ['idle'],
+ ['busy'],
]},
'reqpersec': {
'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
'apache.reqpersec', 'area'],
'lines': [
- ["requests_sec"]
+ ['requests_sec']
]},
'bytespersec': {
'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
'apache.bytesperreq', 'area'],
'lines': [
- ["size_sec", None, 'absolute', 8, 1000]
+ ['size_sec', None, 'absolute', 8, 1000]
]},
'requests': {
'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
'lines': [
- ["requests", None, 'incremental']
+ ['requests', None, 'incremental']
]},
'net': {
'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
'lines': [
- ["sent", None, 'incremental', 8, 1]
+ ['sent', None, 'incremental', 8, 1]
]},
'connections': {
'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'],
'lines': [
- ["connections"]
+ ['connections']
]},
'conns_async': {
'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
'lines': [
- ["keepalive"],
- ["closing"],
- ["writing"]
+ ['keepalive'],
+ ['closing'],
+ ['writing']
]}
}
-ASSIGNMENT = {"BytesPerReq": 'size_req',
- "IdleWorkers": 'idle',
- "IdleServers": 'idle_servers',
- "BusyWorkers": 'busy',
- "BusyServers": 'busy_servers',
- "ReqPerSec": 'requests_sec',
- "BytesPerSec": 'size_sec',
- "Total Accesses": 'requests',
- "Total kBytes": 'sent',
- "ConnsTotal": 'connections',
- "ConnsAsyncKeepAlive": 'keepalive',
- "ConnsAsyncClosing": 'closing',
- "ConnsAsyncWriting": 'writing'}
+ASSIGNMENT = {
+ 'BytesPerReq': 'size_req',
+ 'IdleWorkers': 'idle',
+ 'IdleServers': 'idle_servers',
+ 'BusyWorkers': 'busy',
+ 'BusyServers': 'busy_servers',
+ 'ReqPerSec': 'requests_sec',
+ 'BytesPerSec': 'size_sec',
+ 'Total Accesses': 'requests',
+ 'Total kBytes': 'sent',
+ 'ConnsTotal': 'connections',
+ 'ConnsAsyncKeepAlive': 'keepalive',
+ 'ConnsAsyncClosing': 'closing',
+ 'ConnsAsyncWriting': 'writing'
+}
class Service(UrlService):
@@ -102,8 +105,8 @@ class Service(UrlService):
for chart in self.definitions:
if chart == 'workers':
lines = self.definitions[chart]['lines']
- lines[0] = ["idle_servers", 'idle']
- lines[1] = ["busy_servers", 'busy']
+ lines[0] = ['idle_servers', 'idle']
+ lines[1] = ['busy_servers', 'busy']
opts = self.definitions[chart]['options']
opts[1] = opts[1].replace('apache', 'lighttpd')
opts[4] = opts[4].replace('apache', 'lighttpd')
@@ -120,7 +123,7 @@ class Service(UrlService):
data = dict()
for row in raw_data.split('\n'):
- tmp = row.split(":")
+ tmp = row.split(':')
if tmp[0] in ASSIGNMENT:
try:
data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
diff --git a/conf.d/python.d/apache.conf b/collectors/python.d.plugin/apache/apache.conf
index 3bbc3f786..8b606f7e0 100644
--- a/conf.d/python.d/apache.conf
+++ b/collectors/python.d.plugin/apache/apache.conf
@@ -84,4 +84,4 @@ localipv4:
localipv6:
name : 'local'
- url : 'http://::1/server-status?auto'
+ url : 'http://[::1]/server-status?auto'
diff --git a/collectors/python.d.plugin/beanstalk/Makefile.inc b/collectors/python.d.plugin/beanstalk/Makefile.inc
new file mode 100644
index 000000000..4bbb7087d
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += beanstalk/beanstalk.chart.py
+dist_pythonconfig_DATA += beanstalk/beanstalk.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += beanstalk/README.md beanstalk/Makefile.inc
+
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
new file mode 100644
index 000000000..c2d7d5787
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -0,0 +1,103 @@
+# beanstalk
+
+Module provides server and tube-level statistics:
+
+**Requirements:**
+ * `python-beanstalkc`
+
+**Server statistics:**
+
+1. **Cpu usage** in cpu time
+ * user
+ * system
+
+2. **Jobs rate** in jobs/s
+ * total
+ * timeouts
+
+3. **Connections rate** in connections/s
+ * connections
+
+4. **Commands rate** in commands/s
+ * put
+ * peek
+ * peek-ready
+ * peek-delayed
+ * peek-buried
+ * reserve
+ * use
+ * watch
+ * ignore
+ * delete
+ * release
+ * bury
+ * kick
+ * stats
+ * stats-job
+ * stats-tube
+ * list-tubes
+ * list-tube-used
+ * list-tubes-watched
+ * pause-tube
+
+5. **Current tubes** in tubes
+ * tubes
+
+6. **Current jobs** in jobs
+ * urgent
+ * ready
+ * reserved
+ * delayed
+ * buried
+
+7. **Current connections** in connections
+ * written
+ * producers
+ * workers
+ * waiting
+
+8. **Binlog** in records/s
+ * written
+ * migrated
+
+9. **Uptime** in seconds
+ * uptime
+
+**Per tube statistics:**
+
+1. **Jobs rate** in jobs/s
+ * jobs
+
+2. **Jobs** in jobs
+ * using
+ * ready
+ * reserved
+ * delayed
+ * buried
+
+3. **Connections** in connections
+ * using
+ * waiting
+ * watching
+
+4. **Commands** in commands/s
+ * deletes
+ * pauses
+
+5. **Pause** in seconds
+ * since
+ * left
+
+
+### configuration
+
+Sample:
+
+```yaml
+host : '127.0.0.1'
+port : 11300
+```
+
+If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
+
+---
diff --git a/python.d/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
index 8880afdd9..1472b4e1a 100644
--- a/python.d/beanstalk.chart.py
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: beanstalk netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
try:
import beanstalkc
@@ -8,13 +9,8 @@ try:
except ImportError:
BEANSTALKC = False
-try:
- import yaml
- YAML = True
-except ImportError:
- YAML = False
-
from bases.FrameworkServices.SimpleService import SimpleService
+from bases.loaders import safe_load
# default module values (can be overridden per job in `config`)
# update_every = 2
@@ -114,12 +110,13 @@ CHARTS = {
def tube_chart_template(name):
- order = ['{0}_jobs_rate'.format(name),
- '{0}_jobs'.format(name),
- '{0}_connections'.format(name),
- '{0}_commands'.format(name),
- '{0}_pause'.format(name)
- ]
+ order = [
+ '{0}_jobs_rate'.format(name),
+ '{0}_jobs'.format(name),
+ '{0}_connections'.format(name),
+ '{0}_commands'.format(name),
+ '{0}_pause'.format(name)
+ ]
family = 'tube {0}'.format(name)
charts = {
@@ -127,7 +124,8 @@ def tube_chart_template(name):
'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'],
'lines': [
['_'.join([name, 'total-jobs']), 'jobs', 'incremental']
- ]},
+ ]
+ },
order[1]: {
'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'],
'lines': [
@@ -136,27 +134,30 @@ def tube_chart_template(name):
['_'.join([name, 'current-jobs-reserved']), 'reserved'],
['_'.join([name, 'current-jobs-delayed']), 'delayed'],
['_'.join([name, 'current-jobs-buried']), 'buried']
- ]},
+ ]
+ },
order[2]: {
'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'],
'lines': [
['_'.join([name, 'current-using']), 'using'],
['_'.join([name, 'current-waiting']), 'waiting'],
['_'.join([name, 'current-watching']), 'watching']
- ]},
+ ]
+ },
order[3]: {
'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'],
'lines': [
['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'],
['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental']
- ]},
+ ]
+ },
order[4]: {
'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'],
'lines': [
['_'.join([name, 'pause']), 'since'],
['_'.join([name, 'pause-time-left']), 'left']
- ]}
-
+ ]
+ }
}
return order, charts
@@ -176,10 +177,6 @@ class Service(SimpleService):
self.error("'beanstalkc' module is needed to use beanstalk.chart.py")
return False
- if not YAML:
- self.error("'yaml' module is needed to use beanstalk.chart.py")
- return False
-
self.conn = self.connect()
return True if self.conn else False
@@ -231,7 +228,7 @@ class Service(SimpleService):
return beanstalkc.Connection(host=host,
port=port,
connect_timeout=timeout,
- parse_yaml=yaml.load)
+ parse_yaml=safe_load)
except beanstalkc.SocketError as error:
self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error))
return None
diff --git a/conf.d/python.d/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf
index 940801877..940801877 100644
--- a/conf.d/python.d/beanstalk.conf
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf
diff --git a/collectors/python.d.plugin/bind_rndc/Makefile.inc b/collectors/python.d.plugin/bind_rndc/Makefile.inc
new file mode 100644
index 000000000..72f391492
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += bind_rndc/bind_rndc.chart.py
+dist_pythonconfig_DATA += bind_rndc/bind_rndc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += bind_rndc/README.md bind_rndc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
new file mode 100644
index 000000000..688297ab3
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -0,0 +1,60 @@
+# bind_rndc
+
+Module parses bind dump file to collect real-time performance metrics
+
+**Requirements:**
+ * Version of bind must be 9.6 +
+ * Netdata must have permissions to run `rndc stats`
+
+It produces:
+
+1. **Name server statistics**
+ * requests
+ * responses
+ * success
+ * auth_answer
+ * nonauth_answer
+ * nxrrset
+ * failure
+ * nxdomain
+ * recursion
+ * duplicate
+ * rejections
+
+2. **Incoming queries**
+ * RESERVED0
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * MX
+ * TXT
+ * X25
+ * AAAA
+ * SRV
+ * NAPTR
+ * A6
+ * DS
+ * RSIG
+ * DNSKEY
+ * SPF
+ * ANY
+ * DLV
+
+3. **Outgoing queries**
+ * Same as Incoming queries
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ named_stats_path : '/var/log/bind/named.stats'
+```
+
+If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
+
+---
diff --git a/python.d/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
index cc96659b2..423232f65 100644
--- a/python.d/bind_rndc.chart.py
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: bind rndc netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
@@ -35,56 +36,50 @@ CHARTS = {
['nms_dropped_queries', 'dropped_queries', 'incremental'],
]},
'incoming_queries': {
- 'options': [None, 'Incoming Queries', 'queries', 'incoming queries',
- 'bind_rndc.incoming_queries', 'line'],
+ 'options': [None, 'Incoming Queries', 'queries', 'incoming queries', 'bind_rndc.incoming_queries', 'line'],
'lines': [
]},
'outgoing_queries': {
- 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries',
- 'bind_rndc.outgoing_queries', 'line'],
+ 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries', 'bind_rndc.outgoing_queries', 'line'],
'lines': [
]},
'named_stats_size': {
- 'options': [None, 'Named Stats File Size', 'MB', 'file size',
- 'bind_rndc.stats_size', 'line'],
+ 'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'],
'lines': [
['stats_size', None, 'absolute', 1, 1 << 20]
- ]}
+ ]
+ }
}
NMS = {
- 'nms_requests':
- ['IPv4 requests received',
- 'IPv6 requests received',
- 'TCP requests received',
- 'requests with EDNS(0) receive'],
- 'nms_responses':
- ['responses sent',
- 'truncated responses sent',
- 'responses with EDNS(0) sent',
- 'requests with unsupported EDNS version received'],
- 'nms_failure':
- ['other query failures',
- 'queries resulted in SERVFAIL'],
- 'nms_auth_answer':
- ['queries resulted in authoritative answer'],
- 'nms_non_auth_answer':
- ['queries resulted in non authoritative answer'],
- 'nms_nxrrset':
- ['queries resulted in nxrrset'],
- 'nms_success':
- ['queries resulted in successful answer'],
- 'nms_nxdomain':
- ['queries resulted in NXDOMAIN'],
- 'nms_recursion':
- ['queries caused recursion'],
- 'nms_duplicate':
- ['duplicate queries received'],
- 'nms_rejected_queries':
- ['auth queries rejected',
- 'recursive queries rejected'],
- 'nms_dropped_queries':
- ['queries dropped']
+ 'nms_requests': [
+ 'IPv4 requests received',
+ 'IPv6 requests received',
+ 'TCP requests received',
+ 'requests with EDNS(0) receive'
+ ],
+ 'nms_responses': [
+ 'responses sent',
+ 'truncated responses sent',
+ 'responses with EDNS(0) sent',
+ 'requests with unsupported EDNS version received'
+ ],
+ 'nms_failure': [
+ 'other query failures',
+ 'queries resulted in SERVFAIL'
+ ],
+ 'nms_auth_answer': ['queries resulted in authoritative answer'],
+ 'nms_non_auth_answer': ['queries resulted in non authoritative answer'],
+ 'nms_nxrrset': ['queries resulted in nxrrset'],
+ 'nms_success': ['queries resulted in successful answer'],
+ 'nms_nxdomain': ['queries resulted in NXDOMAIN'],
+ 'nms_recursion': ['queries caused recursion'],
+ 'nms_duplicate': ['duplicate queries received'],
+ 'nms_rejected_queries': [
+ 'auth queries rejected',
+ 'recursive queries rejected'
+ ],
+ 'nms_dropped_queries': ['queries dropped']
}
STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
@@ -215,7 +210,9 @@ def parse_stats(field, named_stats):
if '[' in line:
continue
v, k = line.strip().split(' ', 1)
- data[k] = int(v)
+ if k not in data:
+ data[k] = 0
+ data[k] += int(v)
continue
break
break
diff --git a/conf.d/python.d/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
index 71958ff98..71958ff98 100644
--- a/conf.d/python.d/bind_rndc.conf
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
diff --git a/collectors/python.d.plugin/boinc/Makefile.inc b/collectors/python.d.plugin/boinc/Makefile.inc
new file mode 100644
index 000000000..319e19cfe
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += boinc/boinc.chart.py
+dist_pythonconfig_DATA += boinc/boinc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += boinc/README.md boinc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
new file mode 100644
index 000000000..595bcd3c0
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -0,0 +1,28 @@
+# boinc
+
+This module monitors task counts for the Berkely Open Infrastructure
+Networking Computing (BOINC) distributed computing client using the same
+RPC interface that the BOINC monitoring GUI does.
+
+It provides charts tracking the total number of tasks and active tasks,
+as well as ones tracking each of the possible states for tasks.
+
+### configuration
+
+BOINC requires use of a password to access it's RPC interface. You can
+find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
+
+By default, the module will try to auto-detect the password by looking
+in `/var/lib/boinc` for this file (this is the location most Linux
+distributions use for a system-wide BOINC installation), so things may
+just work without needing configuration for the local system.
+
+You can monitor remote systems as well:
+
+```yaml
+remote:
+ hostname: some-host
+ password: some-password
+```
+
+---
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
new file mode 100644
index 000000000..d14754c4b
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# Description: BOINC netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+from third_party import boinc_client
+
+
+ORDER = ['tasks', 'states', 'sched_states', 'process_states']
+
+CHARTS = {
+ 'tasks': {
+ 'options': [None, 'Overall Tasks', 'tasks', 'boinc', 'boinc.tasks', 'line'],
+ 'lines': [
+ ['total', 'Total', 'absolute', 1, 1],
+ ['active', 'Active', 'absolute', 1, 1]
+ ]
+ },
+ 'states': {
+ 'options': [None, 'Tasks per State', 'tasks', 'boinc', 'boinc.states', 'line'],
+ 'lines': [
+ ['new', 'New', 'absolute', 1, 1],
+ ['downloading', 'Downloading', 'absolute', 1, 1],
+ ['downloaded', 'Ready to Run', 'absolute', 1, 1],
+ ['comperror', 'Compute Errors', 'absolute', 1, 1],
+ ['uploading', 'Uploading', 'absolute', 1, 1],
+ ['uploaded', 'Uploaded', 'absolute', 1, 1],
+ ['aborted', 'Aborted', 'absolute', 1, 1],
+ ['upload_failed', 'Failed Uploads', 'absolute', 1, 1]
+ ]
+ },
+ 'sched_states': {
+ 'options': [None, 'Tasks per Scheduler State', 'tasks', 'boinc', 'boinc.sched', 'line'],
+ 'lines': [
+ ['uninit_sched', 'Uninitialized', 'absolute', 1, 1],
+ ['preempted', 'Preempted', 'absolute', 1, 1],
+ ['scheduled', 'Scheduled', 'absolute', 1, 1]
+ ]
+ },
+ 'process_states': {
+ 'options': [None, 'Tasks per Process State', 'tasks', 'boinc', 'boinc.process', 'line'],
+ 'lines': [
+ ['uninit_proc', 'Uninitialized', 'absolute', 1, 1],
+ ['executing', 'Executing', 'absolute', 1, 1],
+ ['suspended', 'Suspended', 'absolute', 1, 1],
+ ['aborting', 'Aborted', 'absolute', 1, 1],
+ ['quit', 'Quit', 'absolute', 1, 1],
+ ['copy_pending', 'Copy Pending', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# A simple template used for pre-loading the return dictionary to make
+# the _get_data() method simpler.
+_DATA_TEMPLATE = {
+ 'total': 0,
+ 'active': 0,
+ 'new': 0,
+ 'downloading': 0,
+ 'downloaded': 0,
+ 'comperror': 0,
+ 'uploading': 0,
+ 'uploaded': 0,
+ 'aborted': 0,
+ 'upload_failed': 0,
+ 'uninit_sched': 0,
+ 'preempted': 0,
+ 'scheduled': 0,
+ 'uninit_proc': 0,
+ 'executing': 0,
+ 'suspended': 0,
+ 'aborting': 0,
+ 'quit': 0,
+ 'copy_pending': 0
+}
+
+# Map task states to dimensions
+_TASK_MAP = {
+ boinc_client.ResultState.NEW: 'new',
+ boinc_client.ResultState.FILES_DOWNLOADING: 'downloading',
+ boinc_client.ResultState.FILES_DOWNLOADED: 'downloaded',
+ boinc_client.ResultState.COMPUTE_ERROR: 'comperror',
+ boinc_client.ResultState.FILES_UPLOADING: 'uploading',
+ boinc_client.ResultState.FILES_UPLOADED: 'uploaded',
+ boinc_client.ResultState.ABORTED: 'aborted',
+ boinc_client.ResultState.UPLOAD_FAILED: 'upload_failed'
+}
+
+# Map scheduler states to dimensions
+_SCHED_MAP = {
+ boinc_client.CpuSched.UNINITIALIZED: 'uninit_sched',
+ boinc_client.CpuSched.PREEMPTED: 'preempted',
+ boinc_client.CpuSched.SCHEDULED: 'scheduled',
+}
+
+# Maps process states to dimensions
+_PROC_MAP = {
+ boinc_client.Process.UNINITIALIZED: 'uninit_proc',
+ boinc_client.Process.EXECUTING: 'executing',
+ boinc_client.Process.SUSPENDED: 'suspended',
+ boinc_client.Process.ABORT_PENDING: 'aborted',
+ boinc_client.Process.QUIT_PENDING: 'quit',
+ boinc_client.Process.COPY_PENDING: 'copy_pending'
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 0)
+ self.password = self.configuration.get('password', '')
+ self.client = boinc_client.BoincClient(host=self.host, port=self.port, passwd=self.password)
+ self.alive = False
+
+ def check(self):
+ return self.connect()
+
+ def connect(self):
+ self.client.connect()
+ self.alive = self.client.connected and self.client.authorized
+ return self.alive
+
+ def reconnect(self):
+ # The client class itself actually disconnects existing
+ # connections when it is told to connect, so we don't need to
+ # explicitly disconnect when we're just trying to reconnect.
+ return self.connect()
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+ data = dict(_DATA_TEMPLATE)
+ results = []
+ try:
+ results = self.client.get_tasks()
+ except socket.error:
+ self.error('Connection is dead')
+ self.alive = False
+ return None
+ for task in results:
+ data['total'] += 1
+ data[_TASK_MAP[task.state]] += 1
+ try:
+ if task.active_task:
+ data['active'] += 1
+ data[_SCHED_MAP[task.scheduler_state]] += 1
+ data[_PROC_MAP[task.active_task_state]] += 1
+ except AttributeError:
+ pass
+ return data
diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf
new file mode 100644
index 000000000..e59d2509d
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for boinc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, boinc also supports the following:
+#
+# hostname: localhost # The host running the BOINC client
+# port: 31416 # The remote GUI RPC port for BOINC
+# password: '' # The remote GUI RPC password
diff --git a/collectors/python.d.plugin/ceph/Makefile.inc b/collectors/python.d.plugin/ceph/Makefile.inc
new file mode 100644
index 000000000..15b039ef6
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ceph/ceph.chart.py
+dist_pythonconfig_DATA += ceph/ceph.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ceph/README.md ceph/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
new file mode 100644
index 000000000..29dfe5d1d
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -0,0 +1,32 @@
+# ceph
+
+This module monitors the ceph cluster usage and consuption data of a server.
+
+It produces:
+
+* Cluster statistics (usage, available, latency, objects, read/write rate)
+* OSD usage
+* OSD latency
+* Pool usage
+* Pool read/write operations
+* Pool read/write rate
+* number of objects per pool
+
+**Requirements:**
+
+- `rados` python module
+- Granting read permissions to ceph group from keyring file
+```shell
+# chmod 640 /etc/ceph/ceph.client.admin.keyring
+```
+
+### Configuration
+
+Sample:
+```yaml
+local:
+ config_file: '/etc/ceph/ceph.conf'
+ keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+```
+
+---
diff --git a/python.d/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
index fb78397d0..31c764d0f 100644
--- a/python.d/ceph.chart.py
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: ceph netdata python.d module
# Author: Luis Eduardo (lets00)
+# SPDX-License-Identifier: GPL-3.0-or-later
try:
import rados
@@ -8,6 +9,7 @@ try:
except ImportError:
CEPH = False
+import os
import json
from bases.FrameworkServices.SimpleService import SimpleService
@@ -16,17 +18,29 @@ update_every = 10
priority = 60000
retries = 60
-ORDER = ['general_usage', 'general_objects', 'general_bytes', 'general_operations',
- 'general_latency', 'pool_usage', 'pool_objects', 'pool_read_bytes',
- 'pool_write_bytes', 'pool_read_operations', 'pool_write_operations', 'osd_usage',
- 'osd_apply_latency', 'osd_commit_latency']
+ORDER = [
+ 'general_usage',
+ 'general_objects',
+ 'general_bytes',
+ 'general_operations',
+ 'general_latency',
+ 'pool_usage',
+ 'pool_objects',
+ 'pool_read_bytes',
+ 'pool_write_bytes',
+ 'pool_read_operations',
+ 'pool_write_operations',
+ 'osd_usage',
+ 'osd_apply_latency',
+ 'osd_commit_latency'
+]
CHARTS = {
'general_usage': {
'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'],
'lines': [
- ['general_available', 'avail', 'absolute', 1, 1024],
- ['general_usage', 'used', 'absolute', 1, 1024]
+ ['general_available', 'avail', 'absolute'],
+ ['general_usage', 'used', 'absolute']
]
},
'general_objects': {
@@ -118,6 +132,20 @@ class Service(SimpleService):
if not (self.config_file and self.keyring_file):
self.error('config_file and/or keyring_file is not defined')
return False
+
+ # Verify files and permissions
+ if not (os.access(self.config_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.keyring_file))
+ return False
+ if not (os.access(self.config_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file))
+ return False
try:
self.cluster = rados.Rados(conffile=self.config_file,
conf=dict(keyring=self.keyring_file))
@@ -148,11 +176,11 @@ class Service(SimpleService):
pool['name'],
'absolute', 1, 1024])
self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
+ pool['name'],
+ 'absolute'])
self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
+ pool['name'],
+ 'absolute'])
# OSD lines
for osd in sorted(self._get_osd_df()['nodes']):
@@ -214,16 +242,17 @@ class Service(SimpleService):
apply_latency += perf['perf_stats']['apply_latency_ms']
commit_latency += perf['perf_stats']['commit_latency_ms']
- return {'general_usage': int(status['kb_used']),
- 'general_available': int(status['kb_avail']),
- 'general_objects': int(status['num_objects']),
- 'general_read_bytes': read_bytes_sec,
- 'general_write_bytes': write_bytes_sec,
- 'general_read_operations': read_op_per_sec,
- 'general_write_operations': write_op_per_sec,
- 'general_apply_latency': apply_latency,
- 'general_commit_latency': commit_latency
- }
+ return {
+ 'general_usage': int(status['kb_used']),
+ 'general_available': int(status['kb_avail']),
+ 'general_objects': int(status['num_objects']),
+ 'general_read_bytes': read_bytes_sec,
+ 'general_write_bytes': write_bytes_sec,
+ 'general_read_operations': read_op_per_sec,
+ 'general_write_operations': write_op_per_sec,
+ 'general_apply_latency': apply_latency,
+ 'general_commit_latency': commit_latency
+ }
@staticmethod
def _get_pool_usage(pool):
@@ -247,11 +276,12 @@ class Service(SimpleService):
Get read/write kb and operations in a pool
:return: A pool dict with both read/write bytes and operations.
"""
- return {'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
- 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
- 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
- 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
- }
+ return {
+ 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
+ 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
+ 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
+ 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
+ }
@staticmethod
def _get_osd_usage(osd):
@@ -267,8 +297,10 @@ class Service(SimpleService):
Get ceph osd apply and commit latency
:return: A osd dict with osd name's key with both apply and commit latency values
"""
- return {'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
- 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']}
+ return {
+ 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
+ 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']
+ }
def _get_df(self):
"""
diff --git a/conf.d/python.d/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
index 78ac1e251..78ac1e251 100644
--- a/conf.d/python.d/ceph.conf
+++ b/collectors/python.d.plugin/ceph/ceph.conf
diff --git a/collectors/python.d.plugin/chrony/Makefile.inc b/collectors/python.d.plugin/chrony/Makefile.inc
new file mode 100644
index 000000000..18a805b12
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += chrony/chrony.chart.py
+dist_pythonconfig_DATA += chrony/chrony.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += chrony/README.md chrony/Makefile.inc
+
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
new file mode 100644
index 000000000..30636fe77
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -0,0 +1,31 @@
+# chrony
+
+This module monitors the precision and statistics of a local chronyd server.
+
+It produces:
+
+* frequency
+* last offset
+* RMS offset
+* residual freq
+* root delay
+* root dispersion
+* skew
+* system time
+
+**Requirements:**
+Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
+
+### Configuration
+
+Sample:
+```yaml
+# data collection frequency:
+update_every: 1
+
+# chrony query command:
+local:
+ command: 'chronyc -n tracking'
+```
+
+---
diff --git a/python.d/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py
index 8f331fa50..fd01d4e85 100644
--- a/python.d/chrony.chart.py
+++ b/collectors/python.d.plugin/chrony/chrony.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: chrony netdata python.d module
# Author: Dominik Schloesser (domschl)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.ExecutableService import ExecutableService
@@ -13,66 +14,70 @@ retries = 10
ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
CHARTS = {
- # id: {
- # 'options': [name, title, units, family, context, charttype],
- # 'lines': [
- # [unique_dimension_name, name, algorithm, multiplier, divisor]
- # ]}
'system': {
- 'options': [None, "Chrony System Time Deltas", "microseconds", 'system', 'chrony.system', 'area'],
+ 'options': [None, 'Chrony System Time Deltas', 'microseconds', 'system', 'chrony.system', 'area'],
'lines': [
- ['timediff', 'system time', 'absolute', 1, 1000]
- ]},
+ ['timediff', 'system time', 'absolute', 1, 1000]
+ ]
+ },
'offsets': {
- 'options': [None, "Chrony System Time Offsets", "microseconds", 'system', 'chrony.offsets', 'area'],
+ 'options': [None, 'Chrony System Time Offsets', 'microseconds', 'system', 'chrony.offsets', 'area'],
'lines': [
['lastoffset', 'last offset', 'absolute', 1, 1000],
- ['rmsoffset', 'RMS offset', 'absolute', 1, 1000]
- ]},
+ ['rmsoffset', 'RMS offset', 'absolute', 1, 1000]
+ ]
+ },
'stratum': {
- 'options': [None, "Chrony Stratum", "stratum", 'root', 'chrony.stratum', 'line'],
+ 'options': [None, 'Chrony Stratum', 'stratum', 'root', 'chrony.stratum', 'line'],
'lines': [
['stratum', None, 'absolute', 1, 1]
- ]},
+ ]
+ },
'root': {
- 'options': [None, "Chrony Root Delays", "milliseconds", 'root', 'chrony.root', 'line'],
+ 'options': [None, 'Chrony Root Delays', 'milliseconds', 'root', 'chrony.root', 'line'],
'lines': [
- ['rootdelay', 'delay', 'absolute', 1, 1000000],
+ ['rootdelay', 'delay', 'absolute', 1, 1000000],
['rootdispersion', 'dispersion', 'absolute', 1, 1000000]
- ]},
+ ]
+ },
'frequency': {
- 'options': [None, "Chrony Frequency", "ppm", 'frequencies', 'chrony.frequency', 'area'],
+ 'options': [None, 'Chrony Frequency', 'ppm', 'frequencies', 'chrony.frequency', 'area'],
'lines': [
['frequency', None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'residualfreq': {
- 'options': [None, "Chrony Residual frequency", "ppm", 'frequencies', 'chrony.residualfreq', 'area'],
+ 'options': [None, 'Chrony Residual frequency', 'ppm', 'frequencies', 'chrony.residualfreq', 'area'],
'lines': [
['residualfreq', 'residual frequency', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'skew': {
- 'options': [None, "Chrony Skew, error bound on frequency", "ppm", 'frequencies', 'chrony.skew', 'area'],
+ 'options': [None, 'Chrony Skew, error bound on frequency', 'ppm', 'frequencies', 'chrony.skew', 'area'],
'lines': [
['skew', None, 'absolute', 1, 1000]
- ]}
+ ]
+ }
}
-CHRONY = [('Frequency', 'frequency', 1e3),
- ('Last offset', 'lastoffset', 1e9),
- ('RMS offset', 'rmsoffset', 1e9),
- ('Residual freq', 'residualfreq', 1e3),
- ('Root delay', 'rootdelay', 1e9),
- ('Root dispersion', 'rootdispersion', 1e9),
- ('Skew', 'skew', 1e3),
- ('Stratum', 'stratum', 1),
- ('System time', 'timediff', 1e9)]
+CHRONY = [
+ ('Frequency', 'frequency', 1e3),
+ ('Last offset', 'lastoffset', 1e9),
+ ('RMS offset', 'rmsoffset', 1e9),
+ ('Residual freq', 'residualfreq', 1e3),
+ ('Root delay', 'rootdelay', 1e9),
+ ('Root dispersion', 'rootdispersion', 1e9),
+ ('Skew', 'skew', 1e3),
+ ('Stratum', 'stratum', 1),
+ ('System time', 'timediff', 1e9)
+]
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(
self, configuration=configuration, name=name)
- self.command = "chronyc -n tracking"
+ self.command = 'chronyc -n tracking'
self.order = ORDER
self.definitions = CHARTS
diff --git a/conf.d/python.d/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf
index 9ac906b5f..9ac906b5f 100644
--- a/conf.d/python.d/chrony.conf
+++ b/collectors/python.d.plugin/chrony/chrony.conf
diff --git a/collectors/python.d.plugin/couchdb/Makefile.inc b/collectors/python.d.plugin/couchdb/Makefile.inc
new file mode 100644
index 000000000..89dfb51c7
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += couchdb/couchdb.chart.py
+dist_pythonconfig_DATA += couchdb/couchdb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += couchdb/README.md couchdb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
new file mode 100644
index 000000000..eff8c0810
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -0,0 +1,35 @@
+# couchdb
+
+This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
+
+* Overall server reads/writes
+* HTTP traffic breakdown
+ * Request methods (`GET`, `PUT`, `POST`, etc.)
+ * Response status codes (`200`, `201`, `4xx`, etc.)
+* Active server tasks
+* Replication status (CouchDB 2.1 and up only)
+* Erlang VM stats
+* Optional per-database statistics: sizes, # of docs, # of deleted docs
+
+### Configuration
+
+Sample for a local server running on port 5984:
+```yaml
+local:
+ user: 'admin'
+ pass: 'password'
+ node: 'couchdb@127.0.0.1'
+```
+
+Be sure to specify a correct admin-level username and password.
+
+You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
+
+If you want per-database statistics, these need to be added to the configuration, separated by spaces:
+```yaml
+local:
+ ...
+ databases: 'db1 db2 db3 ...'
+```
+
+---
diff --git a/python.d/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
index 558bac587..5d6b9916f 100644
--- a/python.d/couchdb.chart.py
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
@@ -2,6 +2,7 @@
# Description: couchdb netdata python.d module
# Author: wohali <wohali@apache.org>
# Thanks to l2isbad for good examples :)
+# SPDX-License-Identifier: GPL-3.0-or-later
from collections import namedtuple, defaultdict
from json import loads
@@ -24,7 +25,7 @@ METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
OVERVIEW_STATS = [
'couchdb.database_reads.value',
'couchdb.database_writes.value',
- 'couchdb.httpd.view_reads.value'
+ 'couchdb.httpd.view_reads.value',
'couchdb.httpd_request_methods.COPY.value',
'couchdb.httpd_request_methods.DELETE.value',
'couchdb.httpd_request_methods.GET.value',
diff --git a/conf.d/python.d/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf
index 5f6e75cff..5f6e75cff 100644
--- a/conf.d/python.d/couchdb.conf
+++ b/collectors/python.d.plugin/couchdb/couchdb.conf
diff --git a/collectors/python.d.plugin/cpufreq/Makefile.inc b/collectors/python.d.plugin/cpufreq/Makefile.inc
new file mode 100644
index 000000000..d6138801d
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += cpufreq/cpufreq.chart.py
+dist_pythonconfig_DATA += cpufreq/cpufreq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpufreq/README.md cpufreq/Makefile.inc
+
diff --git a/collectors/python.d.plugin/cpufreq/README.md b/collectors/python.d.plugin/cpufreq/README.md
new file mode 100644
index 000000000..33891d59d
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/README.md
@@ -0,0 +1,30 @@
+# cpufreq
+
+This module shows the current CPU frequency as set by the cpufreq kernel
+module.
+
+**Requirement:**
+You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
+enabled in your kernel.
+
+This module tries to read from one of two possible locations. On
+initialization, it tries to read the `time_in_state` files provided by
+cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it
+falls back to using the more inaccurate `scaling_cur_freq` file (which only
+represents the **current** CPU frequency, and doesn't account for any state
+changes which happen between updates).
+
+It produces one chart with multiple lines (one line per core).
+
+### configuration
+
+Sample:
+
+```yaml
+sys_dir: "/sys/devices"
+```
+
+If no configuration is given, module will search for cpufreq files in `/sys/devices` directory.
+Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
+
+---
diff --git a/python.d/cpufreq.chart.py b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
index 3abde736c..cbbab6d7f 100644
--- a/python.d/cpufreq.chart.py
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description: cpufreq netdata python.d module
-# Author: Pawel Krupa (paulfantom) and Steven Noonan (tycho)
+# Author: Pawel Krupa (paulfantom)
+# Author: Steven Noonan (tycho)
+# SPDX-License-Identifier: GPL-3.0-or-later
import glob
import os
@@ -17,7 +19,8 @@ CHARTS = {
'options': [None, 'CPU Clock', 'MHz', 'cpufreq', 'cpufreq.cpufreq', 'line'],
'lines': [
# lines are created dynamically in `check()` method
- ]}
+ ]
+ }
}
@@ -92,7 +95,7 @@ class Service(SimpleService):
self.assignment[cpu]['accurate'] = path
self.accurate_last[cpu] = {}
- if len(self.assignment) == 0:
+ if not self.assignment:
self.accurate_exists = False
for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/scaling_cur_freq'):
@@ -102,7 +105,7 @@ class Service(SimpleService):
self.assignment[cpu] = {}
self.assignment[cpu]['inaccurate'] = path
- if len(self.assignment) == 0:
+ if not self.assignment:
self.error("couldn't find a method to read cpufreq statistics")
return False
@@ -110,4 +113,3 @@ class Service(SimpleService):
self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000])
return True
-
diff --git a/conf.d/python.d/cpufreq.conf b/collectors/python.d.plugin/cpufreq/cpufreq.conf
index 0890245d9..0890245d9 100644
--- a/conf.d/python.d/cpufreq.conf
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.conf
diff --git a/collectors/python.d.plugin/cpuidle/Makefile.inc b/collectors/python.d.plugin/cpuidle/Makefile.inc
new file mode 100644
index 000000000..66c47d3cf
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += cpuidle/cpuidle.chart.py
+dist_pythonconfig_DATA += cpuidle/cpuidle.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpuidle/README.md cpuidle/Makefile.inc
+
diff --git a/collectors/python.d.plugin/cpuidle/README.md b/collectors/python.d.plugin/cpuidle/README.md
new file mode 100644
index 000000000..495169638
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/README.md
@@ -0,0 +1,11 @@
+# cpuidle
+
+This module monitors the usage of CPU idle states.
+
+**Requirement:**
+Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
+
+It produces one stacked chart per CPU, showing the percentage of time spent in
+each state.
+
+---
diff --git a/python.d/cpuidle.chart.py b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
index d14c6aaf3..feac025bf 100644
--- a/python.d/cpuidle.chart.py
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
@@ -1,14 +1,15 @@
# -*- coding: utf-8 -*-
# Description: cpuidle netdata python.d module
# Author: Steven Noonan (tycho)
+# SPDX-License-Identifier: GPL-3.0-or-later
+import ctypes
import glob
import os
import platform
from bases.FrameworkServices.SimpleService import SimpleService
-import ctypes
syscall = ctypes.CDLL('libc.so.6').syscall
# default module values (can be overridden per job in `config`)
@@ -107,7 +108,7 @@ class Service(SimpleService):
def check(self):
if self.__gettid() is None:
- self.error("Cannot get thread ID. Stats would be completely broken.")
+ self.error('Cannot get thread ID. Stats would be completely broken.')
return False
for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
@@ -140,9 +141,8 @@ class Service(SimpleService):
# Sort order by kernel-specified CPU index
self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
- if len(self.definitions) == 0:
+ if not self.definitions:
self.error("couldn't find cstate stats")
return False
return True
-
diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.conf b/collectors/python.d.plugin/cpuidle/cpuidle.conf
new file mode 100644
index 000000000..bc276fcd2
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.conf
@@ -0,0 +1,40 @@
+# netdata python.d.plugin configuration for cpuidle
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
diff --git a/collectors/python.d.plugin/dns_query_time/Makefile.inc b/collectors/python.d.plugin/dns_query_time/Makefile.inc
new file mode 100644
index 000000000..7eca3e0b6
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dns_query_time/dns_query_time.chart.py
+dist_pythonconfig_DATA += dns_query_time/dns_query_time.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dns_query_time/README.md dns_query_time/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
new file mode 100644
index 000000000..3703e8aaf
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -0,0 +1,10 @@
+# dns_query_time
+
+This module provides DNS query time statistics.
+
+**Requirement:**
+* `python-dnspython` package
+
+It produces one aggregate chart or one chart per DNS server, showing the query time.
+
+---
diff --git a/python.d/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
index 9a794a9c9..d3c3db788 100644
--- a/python.d/dns_query_time.chart.py
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
@@ -1,20 +1,25 @@
# -*- coding: utf-8 -*-
# Description: dns_query_time netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from random import choice
-from threading import Thread
from socket import getaddrinfo, gaierror
+from threading import Thread
try:
from time import monotonic as time
except ImportError:
from time import time
+
try:
- import dns.message, dns.query, dns.name
+ import dns.message
+ import dns.query
+ import dns.name
DNS_PYTHON = True
except ImportError:
DNS_PYTHON = False
+
try:
from queue import Queue
except ImportError:
@@ -117,8 +122,12 @@ def check_ns(ns):
def create_charts(aggregate, server_list):
if aggregate:
order = ['dns_group']
- definitions = {'dns_group': {'options': [None, 'DNS Response Time', 'ms', 'name servers',
- 'dns_query_time.response_time', 'line'], 'lines': []}}
+ definitions = {
+ 'dns_group': {
+ 'options': [None, 'DNS Response Time', 'ms', 'name servers', 'dns_query_time.response_time', 'line'],
+ 'lines': []
+ }
+ }
for ns in server_list:
definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
@@ -127,8 +136,10 @@ def create_charts(aggregate, server_list):
order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
definitions = dict()
for ns in server_list:
- definitions[''.join(['dns_', ns.replace('.', '_')])] = {'options': [None, 'DNS Response Time', 'ms', ns,
- 'dns_query_time.response_time', 'area'],
- 'lines': [['_'.join(['ns', ns.replace('.', '_')]),
- ns, 'absolute']]}
+ definitions[''.join(['dns_', ns.replace('.', '_')])] = {
+ 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
+ 'lines': [
+ ['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']
+ ]
+ }
return order, definitions
diff --git a/conf.d/python.d/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
index d32c6db83..d32c6db83 100644
--- a/conf.d/python.d/dns_query_time.conf
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
diff --git a/collectors/python.d.plugin/dnsdist/Makefile.inc b/collectors/python.d.plugin/dnsdist/Makefile.inc
new file mode 100644
index 000000000..a53f518f0
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dnsdist/dnsdist.chart.py
+dist_pythonconfig_DATA += dnsdist/dnsdist.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dnsdist/README.md dnsdist/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
new file mode 100644
index 000000000..b646ae27c
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -0,0 +1,54 @@
+# dnsdist
+
+Module monitor dnsdist performance and health metrics.
+
+Following charts are drawn:
+
+1. **Response latency**
+ * latency-slow
+ * latency100-1000
+ * latency50-100
+ * latency10-50
+ * latency1-10
+ * latency0-1
+
+2. **Cache performance**
+ * cache-hits
+ * cache-misses
+
+3. **ACL events**
+ * acl-drops
+ * rule-drop
+ * rule-nxdomain
+ * rule-refused
+
+4. **Noncompliant data**
+ * empty-queries
+ * no-policy
+ * noncompliant-queries
+ * noncompliant-responses
+
+5. **Queries**
+ * queries
+ * rdqueries
+ * rdqueries
+
+6. **Health**
+ * downstream-send-errors
+ * downstream-timeouts
+ * servfail-responses
+ * trunc-failures
+
+### configuration
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:5053/jsonstat?command=stats'
+ user : 'username'
+ pass : 'password'
+ header:
+ X-API-Key: 'dnsdist-api-key'
+```
+
+---
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
new file mode 100644
index 000000000..1aff3f803
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'queries',
+ 'queries_dropped',
+ 'packets_dropped',
+ 'answers',
+ 'backend_responses',
+ 'backend_commerrors',
+ 'backend_errors',
+ 'cache',
+ 'servercpu',
+ 'servermem',
+ 'query_latency',
+ 'query_latency_avg'
+]
+
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
+ 'lines': [
+ ['queries', 'all', 'incremental'],
+ ['rdqueries', 'recursive', 'incremental'],
+ ['empty-queries', 'empty', 'incremental']
+ ]
+ },
+ 'queries_dropped': {
+ 'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'],
+ 'lines': [
+ ['rule-drop', 'rule drop', 'incremental'],
+ ['dyn-blocked', 'dynamic block', 'incremental'],
+ ['no-policy', 'no policy', 'incremental'],
+ ['noncompliant-queries', 'non compliant', 'incremental']
+ ]
+ },
+ 'packets_dropped': {
+ 'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'],
+ 'lines': [
+ ['acl-drops', 'acl', 'incremental']
+ ]
+ },
+ 'answers': {
+ 'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'],
+ 'lines': [
+ ['self-answered', 'self answered', 'incremental'],
+ ['rule-nxdomain', 'nxdomain', 'incremental', -1],
+ ['rule-refused', 'refused', 'incremental', -1],
+ ['trunc-failures', 'trunc failures', 'incremental', -1]
+ ]
+ },
+ 'backend_responses': {
+ 'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'],
+ 'lines': [
+ ['responses', 'responses', 'incremental']
+ ]
+ },
+ 'backend_commerrors': {
+ 'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'],
+ 'lines': [
+ ['downstream-send-errors', 'send errors', 'incremental']
+ ]
+ },
+ 'backend_errors': {
+ 'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'],
+ 'lines': [
+ ['downstream-timeouts', 'timeout', 'incremental'],
+ ['servfail-responses', 'servfail', 'incremental'],
+ ['noncompliant-responses', 'non compliant', 'incremental']
+ ]
+ },
+ 'cache': {
+ 'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'],
+ 'lines': [
+ ['cache-hits', 'hits', 'incremental'],
+ ['cache-misses', 'misses', 'incremental', -1]
+ ]
+ },
+ 'servercpu': {
+ 'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'],
+ 'lines': [
+ ['cpu-sys-msec', 'system state', 'incremental'],
+ ['cpu-user-msec', 'user state', 'incremental']
+ ]
+ },
+ 'servermem': {
+ 'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'],
+ 'lines': [
+ ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576]
+ ]
+ },
+ 'query_latency': {
+ 'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'],
+ 'lines': [
+ ['latency0-1', '1ms', 'incremental'],
+ ['latency1-10', '10ms', 'incremental'],
+ ['latency10-50', '50ms', 'incremental'],
+ ['latency50-100', '100ms', 'incremental'],
+ ['latency100-1000', '1sec', 'incremental'],
+ ['latency-slow', 'slow', 'incremental']
+ ]
+ },
+ 'query_latency_avg': {
+ 'options': [None, 'Average latency for the last N queries', 'ms/query', 'latency',
+ 'dnsdist.query_latency_avg', 'line'],
+ 'lines': [
+ ['latency-avg100', '100', 'absolute'],
+ ['latency-avg1000', '1k', 'absolute'],
+ ['latency-avg10000', '10k', 'absolute'],
+ ['latency-avg1000000', '1000k', 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ data = self._get_raw_data()
+ if not data:
+ return None
+
+ return loads(data)
diff --git a/conf.d/python.d/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf
index aec58b8e1..aec58b8e1 100644
--- a/conf.d/python.d/dnsdist.conf
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.conf
diff --git a/collectors/python.d.plugin/dockerd/Makefile.inc b/collectors/python.d.plugin/dockerd/Makefile.inc
new file mode 100644
index 000000000..b100bc6a1
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dockerd/dockerd.chart.py
+dist_pythonconfig_DATA += dockerd/dockerd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dockerd/README.md dockerd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
new file mode 100644
index 000000000..d3f603808
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -0,0 +1,26 @@
+# dockerd
+
+Module monitor docker health metrics.
+
+**Requirement:**
+* `docker` package
+
+Following charts are drawn:
+
+1. **running containers**
+ * count
+
+2. **healthy containers**
+ * count
+
+3. **unhealthy containers**
+ * count
+
+### configuration
+
+```yaml
+ update_every : 1
+ priority : 60000
+ ```
+
+---
diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
new file mode 100644
index 000000000..a0d3d7e65
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+# Description: docker netdata python.d module
+# Author: Kévin Darcel (@tuxity)
+
+try:
+ import docker
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 1
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'running_containers',
+ 'healthy_containers',
+ 'unhealthy_containers'
+]
+
+CHARTS = {
+ 'running_containers': {
+ 'options': [None, 'Number of running containers', 'running containers', 'running containers',
+ 'docker.running_containers', 'line'],
+ 'lines': [
+ ['running_containers', 'running']
+ ]
+ },
+ 'healthy_containers': {
+ 'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers',
+ 'docker.healthy_containers', 'line'],
+ 'lines': [
+ ['healthy_containers', 'healthy']
+ ]
+ },
+ 'unhealthy_containers': {
+ 'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers',
+ 'docker.unhealthy_containers', 'line'],
+ 'lines': [
+ ['unhealthy_containers', 'unhealthy']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def check(self):
+ if not HAS_DOCKER:
+ self.error('\'docker\' package is needed to use docker.chart.py')
+ return False
+
+ self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
+
+ try:
+ self.client.ping()
+ except docker.errors.APIError as error:
+ self.error(error)
+ return False
+
+ return True
+
+ def get_data(self):
+ data = dict()
+ data['running_containers'] = len(self.client.containers.list(sparse=True))
+ data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
+ data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
+
+ return data or None
diff --git a/collectors/python.d.plugin/dockerd/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf
new file mode 100644
index 000000000..5ef17a1f5
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/dockerd.conf
@@ -0,0 +1,79 @@
+# netdata python.d.plugin configuration for dockerd health data API
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 10 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dockerd plugin also supports the following:
+#
+# url: '<scheme>://<host>:<port>/<health_page_api>'
+# # http://localhost:8080/health
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ url: 'unix://var/run/docker.sock'
diff --git a/collectors/python.d.plugin/dovecot/Makefile.inc b/collectors/python.d.plugin/dovecot/Makefile.inc
new file mode 100644
index 000000000..fd7d13bbb
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dovecot/dovecot.chart.py
+dist_pythonconfig_DATA += dovecot/dovecot.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dovecot/README.md dovecot/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
new file mode 100644
index 000000000..50950ecc1
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -0,0 +1,73 @@
+# dovecot
+
+This module provides statistics information from Dovecot server.
+Statistics are taken from dovecot socket by executing `EXPORT global` command.
+More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
+
+**Requirement:**
+Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket.
+
+Module gives information with following charts:
+
+1. **sessions**
+ * active sessions
+
+2. **logins**
+ * logins
+
+3. **commands** - number of IMAP commands
+ * commands
+
+4. **Faults**
+ * minor
+ * major
+
+5. **Context Switches**
+ * volountary
+ * involountary
+
+6. **disk** in bytes/s
+ * read
+ * write
+
+7. **bytes** in bytes/s
+ * read
+ * write
+
+8. **number of syscalls** in syscalls/s
+ * read
+ * write
+
+9. **lookups** - number of lookups per second
+ * path
+ * attr
+
+10. **hits** - number of cache hits
+ * hits
+
+11. **attempts** - authorization attempts
+ * success
+ * failure
+
+12. **cache** - cached authorization hits
+ * hit
+ * miss
+
+### configuration
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+```
+
+If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
+
+---
diff --git a/python.d/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
index 5689f2ec9..7fee3bfac 100644
--- a/python.d/dovecot.chart.py
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: dovecot netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.SocketService import SocketService
@@ -10,93 +11,113 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['sessions', 'logins', 'commands',
- 'faults',
- 'context_switches',
- 'io', 'net', 'syscalls',
- 'lookup', 'cache',
- 'auth', 'auth_cache']
+ORDER = [
+ 'sessions',
+ 'logins',
+ 'commands',
+ 'faults',
+ 'context_switches',
+ 'io',
+ 'net',
+ 'syscalls',
+ 'lookup',
+ 'cache',
+ 'auth',
+ 'auth_cache'
+]
CHARTS = {
'sessions': {
- 'options': [None, "Dovecot Active Sessions", 'number', 'sessions', 'dovecot.sessions', 'line'],
+ 'options': [None, 'Dovecot Active Sessions', 'number', 'sessions', 'dovecot.sessions', 'line'],
'lines': [
['num_connected_sessions', 'active sessions', 'absolute']
- ]},
+ ]
+ },
'logins': {
- 'options': [None, "Dovecot Logins", 'number', 'logins', 'dovecot.logins', 'line'],
+ 'options': [None, 'Dovecot Logins', 'number', 'logins', 'dovecot.logins', 'line'],
'lines': [
['num_logins', 'logins', 'absolute']
- ]},
+ ]
+ },
'commands': {
- 'options': [None, "Dovecot Commands", "commands", 'commands', 'dovecot.commands', 'line'],
+ 'options': [None, 'Dovecot Commands', 'commands', 'commands', 'dovecot.commands', 'line'],
'lines': [
['num_cmds', 'commands', 'absolute']
- ]},
+ ]
+ },
'faults': {
- 'options': [None, "Dovecot Page Faults", "faults", 'page faults', 'dovecot.faults', 'line'],
+ 'options': [None, 'Dovecot Page Faults', 'faults', 'page faults', 'dovecot.faults', 'line'],
'lines': [
['min_faults', 'minor', 'absolute'],
['maj_faults', 'major', 'absolute']
- ]},
+ ]
+ },
'context_switches': {
- 'options': [None, "Dovecot Context Switches", '', 'context switches', 'dovecot.context_switches', 'line'],
+ 'options': [None, 'Dovecot Context Switches', '', 'context switches', 'dovecot.context_switches', 'line'],
'lines': [
['vol_cs', 'voluntary', 'absolute'],
['invol_cs', 'involuntary', 'absolute']
- ]},
+ ]
+ },
'io': {
- 'options': [None, "Dovecot Disk I/O", 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
+ 'options': [None, 'Dovecot Disk I/O', 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
'lines': [
['disk_input', 'read', 'incremental', 1, 1024],
['disk_output', 'write', 'incremental', -1, 1024]
- ]},
+ ]
+ },
'net': {
- 'options': [None, "Dovecot Network Bandwidth", 'kilobits/s', 'network', 'dovecot.net', 'area'],
+ 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
'lines': [
['read_bytes', 'read', 'incremental', 8, 1024],
['write_bytes', 'write', 'incremental', -8, 1024]
- ]},
+ ]
+ },
'syscalls': {
- 'options': [None, "Dovecot Number of SysCalls", 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
+ 'options': [None, 'Dovecot Number of SysCalls', 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
'lines': [
['read_count', 'read', 'incremental'],
['write_count', 'write', 'incremental']
- ]},
+ ]
+ },
'lookup': {
- 'options': [None, "Dovecot Lookups", 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
+ 'options': [None, 'Dovecot Lookups', 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
'lines': [
['mail_lookup_path', 'path', 'incremental'],
['mail_lookup_attr', 'attr', 'incremental']
- ]},
+ ]
+ },
'cache': {
- 'options': [None, "Dovecot Cache Hits", 'hits/s', 'cache', 'dovecot.cache', 'line'],
+ 'options': [None, 'Dovecot Cache Hits', 'hits/s', 'cache', 'dovecot.cache', 'line'],
'lines': [
['mail_cache_hits', 'hits', 'incremental']
- ]},
+ ]
+ },
'auth': {
- 'options': [None, "Dovecot Authentications", 'attempts', 'logins', 'dovecot.auth', 'stacked'],
+ 'options': [None, 'Dovecot Authentications', 'attempts', 'logins', 'dovecot.auth', 'stacked'],
'lines': [
['auth_successes', 'ok', 'absolute'],
['auth_failures', 'failed', 'absolute']
- ]},
+ ]
+ },
'auth_cache': {
- 'options': [None, "Dovecot Authentication Cache", 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
+ 'options': [None, 'Dovecot Authentication Cache', 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
'lines': [
['auth_cache_hits', 'hit', 'absolute'],
['auth_cache_misses', 'miss', 'absolute']
- ]}
+ ]
+ }
}
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
- self.request = "EXPORT\tglobal\r\n"
+ self.request = 'EXPORT\tglobal\r\n'
self.host = None # localhost
self.port = None # 24242
# self._keep_alive = True
- self.unix_socket = "/var/run/dovecot/stats"
+ self.unix_socket = '/var/run/dovecot/stats'
self.order = ORDER
self.definitions = CHARTS
@@ -111,7 +132,7 @@ class Service(SocketService):
return None
if raw is None:
- self.debug("dovecot returned no data")
+ self.debug('dovecot returned no data')
return None
data = raw.split('\n')[:2]
diff --git a/conf.d/python.d/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf
index 56c394991..56c394991 100644
--- a/conf.d/python.d/dovecot.conf
+++ b/collectors/python.d.plugin/dovecot/dovecot.conf
diff --git a/collectors/python.d.plugin/elasticsearch/Makefile.inc b/collectors/python.d.plugin/elasticsearch/Makefile.inc
new file mode 100644
index 000000000..15c63c2fa
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += elasticsearch/elasticsearch.chart.py
+dist_pythonconfig_DATA += elasticsearch/elasticsearch.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += elasticsearch/README.md elasticsearch/Makefile.inc
+
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
new file mode 100644
index 000000000..75e17015b
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -0,0 +1,60 @@
+# elasticsearch
+
+This module monitors Elasticsearch performance and health metrics.
+
+It produces:
+
+1. **Search performance** charts:
+ * Number of queries, fetches
+ * Time spent on queries, fetches
+ * Query and fetch latency
+
+2. **Indexing performance** charts:
+ * Number of documents indexed, index refreshes, flushes
+ * Time spent on indexing, refreshing, flushing
+ * Indexing and flushing latency
+
+3. **Memory usage and garbace collection** charts:
+ * JVM heap currently in use, committed
+ * Count of garbage collections
+ * Time spent on garbage collections
+
+4. **Host metrics** charts:
+ * Available file descriptors in percent
+ * Opened HTTP connections
+ * Cluster communication transport metrics
+
+5. **Queues and rejections** charts:
+ * Number of queued/rejected threads in thread pool
+
+6. **Fielddata cache** charts:
+ * Fielddata cache size
+ * Fielddata evictions and circuit breaker tripped count
+
+7. **Cluster health API** charts:
+ * Cluster status
+ * Nodes and tasks statistics
+ * Shards statistics
+
+8. **Cluster stats API** charts:
+ * Nodes statistics
+ * Query cache statistics
+ * Docs statistics
+ * Store statistics
+ * Indices and shards statistics
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'ipaddress' # Server ip address or hostname
+ port : 'password' # Port on which elasticsearch listed
+ cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default.
+ cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default.
+```
+
+If no configuration is given, module will fail to run.
+
+---
diff --git a/python.d/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
index 9c2c58944..3f431f6e0 100644
--- a/python.d/elasticsearch.chart.py
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
@@ -1,11 +1,14 @@
# -*- coding: utf-8 -*-
# Description: elastic search node stats netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+import threading
from collections import namedtuple
-from json import loads
from socket import gethostbyname, gaierror
-from threading import Thread
+
try:
from queue import Queue
except ImportError:
@@ -15,8 +18,6 @@ from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
update_every = 5
-priority = 60000
-retries = 60
METHODS = namedtuple('METHODS', ['get_data', 'url', 'run'])
@@ -63,6 +64,8 @@ NODE_STATS = [
'jvm.buffer_pools.mapped.total_capacity_in_bytes',
'thread_pool.bulk.queue',
'thread_pool.bulk.rejected',
+ 'thread_pool.write.queue',
+ 'thread_pool.write.rejected',
'thread_pool.index.queue',
'thread_pool.index.rejected',
'thread_pool.search.queue',
@@ -107,30 +110,62 @@ HEALTH_STATS = [
]
LATENCY = {
- 'query_latency':
- {'total': 'indices_search_query_total',
- 'spent_time': 'indices_search_query_time_in_millis'},
- 'fetch_latency':
- {'total': 'indices_search_fetch_total',
- 'spent_time': 'indices_search_fetch_time_in_millis'},
- 'indexing_latency':
- {'total': 'indices_indexing_index_total',
- 'spent_time': 'indices_indexing_index_time_in_millis'},
- 'flushing_latency':
- {'total': 'indices_flush_total',
- 'spent_time': 'indices_flush_total_time_in_millis'}
+ 'query_latency': {
+ 'total': 'indices_search_query_total',
+ 'spent_time': 'indices_search_query_time_in_millis'
+ },
+ 'fetch_latency': {
+ 'total': 'indices_search_fetch_total',
+ 'spent_time': 'indices_search_fetch_time_in_millis'
+ },
+ 'indexing_latency': {
+ 'total': 'indices_indexing_index_total',
+ 'spent_time': 'indices_indexing_index_time_in_millis'
+ },
+ 'flushing_latency': {
+ 'total': 'indices_flush_total',
+ 'spent_time': 'indices_flush_total_time_in_millis'
+ }
}
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['search_performance_total', 'search_performance_current', 'search_performance_time',
- 'search_latency', 'index_performance_total', 'index_performance_current', 'index_performance_time',
- 'index_latency', 'index_translog_operations', 'index_translog_size', 'index_segments_count', 'index_segments_memory_writer',
- 'index_segments_memory', 'jvm_mem_heap', 'jvm_mem_heap_bytes', 'jvm_buffer_pool_count',
- 'jvm_direct_buffers_memory', 'jvm_mapped_buffers_memory', 'jvm_gc_count', 'jvm_gc_time', 'host_metrics_file_descriptors',
- 'host_metrics_http', 'host_metrics_transport', 'thread_pool_queued', 'thread_pool_rejected',
- 'fielddata_cache', 'fielddata_evictions_tripped', 'cluster_health_status', 'cluster_health_nodes',
- 'cluster_health_shards', 'cluster_stats_nodes', 'cluster_stats_query_cache', 'cluster_stats_docs',
- 'cluster_stats_store', 'cluster_stats_indices_shards']
+ORDER = [
+ 'search_performance_total',
+ 'search_performance_current',
+ 'search_performance_time',
+ 'search_latency',
+ 'index_performance_total',
+ 'index_performance_current',
+ 'index_performance_time',
+ 'index_latency',
+ 'index_translog_operations',
+ 'index_translog_size',
+ 'index_segments_count',
+ 'index_segments_memory_writer',
+ 'index_segments_memory',
+ 'jvm_mem_heap',
+ 'jvm_mem_heap_bytes',
+ 'jvm_buffer_pool_count',
+ 'jvm_direct_buffers_memory',
+ 'jvm_mapped_buffers_memory',
+ 'jvm_gc_count',
+ 'jvm_gc_time',
+ 'host_metrics_file_descriptors',
+ 'host_metrics_http',
+ 'host_metrics_transport',
+ 'thread_pool_queued',
+ 'thread_pool_rejected',
+ 'fielddata_cache',
+ 'fielddata_evictions_tripped',
+ 'cluster_health_status',
+ 'cluster_health_nodes',
+ 'cluster_health_shards',
+ 'cluster_stats_nodes',
+ 'cluster_stats_query_cache',
+ 'cluster_stats_docs',
+ 'cluster_stats_store',
+ 'cluster_stats_indices_shards',
+]
CHARTS = {
'search_performance_total': {
@@ -139,27 +174,31 @@ CHARTS = {
'lines': [
['indices_search_query_total', 'queries', 'incremental'],
['indices_search_fetch_total', 'fetches', 'incremental']
- ]},
+ ]
+ },
'search_performance_current': {
'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
'elastic.search_performance_current', 'stacked'],
'lines': [
['indices_search_query_current', 'queries', 'absolute'],
['indices_search_fetch_current', 'fetches', 'absolute']
- ]},
+ ]
+ },
'search_performance_time': {
'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
'elastic.search_performance_time', 'stacked'],
'lines': [
['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000],
['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'search_latency': {
'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'],
'lines': [
['query_latency', 'query', 'absolute', 1, 1000],
['fetch_latency', 'fetch', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'index_performance_total': {
'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
'indexing performance', 'elastic.index_performance_total', 'stacked'],
@@ -167,13 +206,15 @@ CHARTS = {
['indices_indexing_index_total', 'indexed', 'incremental'],
['indices_refresh_total', 'refreshes', 'incremental'],
['indices_flush_total', 'flushes', 'incremental']
- ]},
+ ]
+ },
'index_performance_current': {
'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
'indexing performance', 'elastic.index_performance_current', 'stacked'],
'lines': [
['indices_indexing_index_current', 'documents', 'absolute']
- ]},
+ ]
+ },
'index_performance_time': {
'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
'elastic.index_performance_time', 'stacked'],
@@ -181,40 +222,46 @@ CHARTS = {
['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'index_latency': {
'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
'elastic.index_latency', 'stacked'],
'lines': [
['indexing_latency', 'indexing', 'absolute', 1, 1000],
['flushing_latency', 'flushing', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'index_translog_operations': {
'options': [None, 'Translog Operations', 'count', 'translog',
'elastic.index_translog_operations', 'area'],
'lines': [
['indices_translog_operations', 'total', 'absolute'],
['indices_translog_uncommitted_operations', 'uncommited', 'absolute']
- ]},
+ ]
+ },
'index_translog_size': {
'options': [None, 'Translog Size', 'MB', 'translog',
'elastic.index_translog_size', 'area'],
'lines': [
['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
['indices_translog_uncommitted_size_in_bytes', 'uncommited', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'index_segments_count': {
'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments',
'elastic.index_segments_count', 'line'],
'lines': [
['indices_segments_count', 'segments', 'absolute']
- ]},
+ ]
+ },
'index_segments_memory_writer': {
'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments',
'elastic.index_segments_memory_writer', 'area'],
'lines': [
['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'index_segments_memory': {
'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments',
'elastic.index_segments_memory', 'stacked'],
@@ -227,84 +274,98 @@ CHARTS = {
['indices_segments_doc_values_memory_in_bytes', 'doc values', 'absolute', 1, 1048567],
['indices_segments_version_map_memory_in_bytes', 'version map', 'absolute', 1, 1048567],
['indices_segments_fixed_bit_set_memory_in_bytes', 'fixed bit set', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'jvm_mem_heap': {
'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc',
'elastic.jvm_heap', 'area'],
'lines': [
['jvm_mem_heap_used_percent', 'inuse', 'absolute']
- ]},
+ ]
+ },
'jvm_mem_heap_bytes': {
'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc',
'elastic.jvm_heap_bytes', 'area'],
'lines': [
['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576]
- ]},
+ ]
+ },
'jvm_buffer_pool_count': {
'options': [None, 'JVM Buffers', 'count', 'memory usage and gc',
'elastic.jvm_buffer_pool_count', 'line'],
'lines': [
['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
['jvm_buffer_pools_mapped_count', 'mapped', 'absolute']
- ]},
+ ]
+ },
'jvm_direct_buffers_memory': {
'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc',
'elastic.jvm_direct_buffers_memory', 'area'],
'lines': [
['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
['jvm_buffer_pools_direct_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'jvm_mapped_buffers_memory': {
'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc',
'elastic.jvm_mapped_buffers_memory', 'area'],
'lines': [
['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
['jvm_buffer_pools_mapped_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'jvm_gc_count': {
'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
'lines': [
['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
- ]},
+ ]
+ },
'jvm_gc_time': {
'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc',
'elastic.gc_time', 'stacked'],
'lines': [
['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental']
- ]},
+ ]
+ },
'thread_pool_queued': {
'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
'elastic.thread_pool_queued', 'stacked'],
'lines': [
['thread_pool_bulk_queue', 'bulk', 'absolute'],
+ ['thread_pool_write_queue', 'write', 'absolute'],
['thread_pool_index_queue', 'index', 'absolute'],
['thread_pool_search_queue', 'search', 'absolute'],
['thread_pool_merge_queue', 'merge', 'absolute']
- ]},
+ ]
+ },
'thread_pool_rejected': {
'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
'elastic.thread_pool_rejected', 'stacked'],
'lines': [
['thread_pool_bulk_rejected', 'bulk', 'absolute'],
+ ['thread_pool_write_rejected', 'write', 'absolute'],
['thread_pool_index_rejected', 'index', 'absolute'],
['thread_pool_search_rejected', 'search', 'absolute'],
['thread_pool_merge_rejected', 'merge', 'absolute']
- ]},
+ ]
+ },
'fielddata_cache': {
'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
'lines': [
['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
- ]},
+ ]
+ },
'fielddata_evictions_tripped': {
'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
'lines': [
['indices_fielddata_evictions', 'evictions', 'incremental'],
['indices_fielddata_tripped', 'tripped', 'incremental']
- ]},
+ ]
+ },
'cluster_health_nodes': {
'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
'elastic.cluster_health_nodes', 'stacked'],
@@ -313,7 +374,8 @@ CHARTS = {
['number_of_data_nodes', 'data_nodes', 'absolute'],
['number_of_pending_tasks', 'pending_tasks', 'absolute'],
['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
- ]},
+ ]
+ },
'cluster_health_status': {
'options': [None, 'Cluster Status', 'status', 'cluster health API',
'elastic.cluster_health_status', 'area'],
@@ -324,7 +386,8 @@ CHARTS = {
['status_foo2', None, 'absolute'],
['status_foo3', None, 'absolute'],
['status_yellow', 'yellow', 'absolute']
- ]},
+ ]
+ },
'cluster_health_shards': {
'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
'elastic.cluster_health_shards', 'stacked'],
@@ -335,7 +398,8 @@ CHARTS = {
['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
['initializing_shards', 'initializing', 'absolute'],
['active_shards_percent_as_number', 'active_percent', 'absolute']
- ]},
+ ]
+ },
'cluster_stats_nodes': {
'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
'elastic.cluster_nodes', 'stacked'],
@@ -345,52 +409,60 @@ CHARTS = {
['nodes_count_total', 'total', 'absolute'],
['nodes_count_master_only', 'master_only', 'absolute'],
['nodes_count_client', 'client', 'absolute']
- ]},
+ ]
+ },
'cluster_stats_query_cache': {
'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
'elastic.cluster_query_cache', 'stacked'],
'lines': [
['indices_query_cache_hit_count', 'hit', 'incremental'],
['indices_query_cache_miss_count', 'miss', 'incremental']
- ]},
+ ]
+ },
'cluster_stats_docs': {
'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
'elastic.cluster_docs', 'line'],
'lines': [
['indices_docs_count', 'docs', 'absolute']
- ]},
+ ]
+ },
'cluster_stats_store': {
'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
'elastic.cluster_store', 'line'],
'lines': [
['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
- ]},
+ ]
+ },
'cluster_stats_indices_shards': {
'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
'elastic.cluster_indices_shards', 'stacked'],
'lines': [
['indices_count', 'indices', 'absolute'],
['indices_shards_total', 'shards', 'absolute']
- ]},
+ ]
+ },
'host_metrics_transport': {
'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
'elastic.host_transport', 'area'],
'lines': [
['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
- ]},
+ ]
+ },
'host_metrics_file_descriptors': {
'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
'elastic.host_descriptors', 'area'],
'lines': [
['file_descriptors_used', 'used', 'absolute', 1, 10]
- ]},
+ ]
+ },
'host_metrics_http': {
'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
'elastic.host_http_connections', 'line'],
'lines': [
['http_current_open', 'opened', 'absolute', 1, 1]
- ]}
+ ]
+ }
}
@@ -444,8 +516,8 @@ class Service(UrlService):
for method in self.methods:
if not method.run:
continue
- th = Thread(target=method.get_data,
- args=(queue, method.url))
+ th = threading.Thread(target=method.get_data,
+ args=(queue, method.url))
th.start()
threads.append(th)
@@ -466,7 +538,11 @@ class Service(UrlService):
if not raw_data:
return queue.put(dict())
- data = loads(raw_data)
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
to_netdata = fetch_data_(raw_data=data,
metrics=HEALTH_STATS)
@@ -488,7 +564,11 @@ class Service(UrlService):
if not raw_data:
return queue.put(dict())
- data = loads(raw_data)
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
to_netdata = fetch_data_(raw_data=data,
metrics=CLUSTER_STATS)
@@ -505,7 +585,10 @@ class Service(UrlService):
if not raw_data:
return queue.put(dict())
- data = loads(raw_data)
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
node = list(data['nodes'].keys())[0]
to_netdata = fetch_data_(raw_data=data['nodes'][node],
@@ -525,6 +608,13 @@ class Service(UrlService):
return queue.put(to_netdata)
+ def json_reply(self, reply):
+ try:
+ return json.loads(reply)
+ except ValueError as err:
+ self.error(err)
+ return None
+
def find_avg(self, total, spent_time, key):
if key not in self.latency:
self.latency[key] = dict(total=total,
diff --git a/conf.d/python.d/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
index 213843bf9..213843bf9 100644
--- a/conf.d/python.d/elasticsearch.conf
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
diff --git a/collectors/python.d.plugin/example/Makefile.inc b/collectors/python.d.plugin/example/Makefile.inc
new file mode 100644
index 000000000..1b027d5a7
--- /dev/null
+++ b/collectors/python.d.plugin/example/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += example/example.chart.py
+dist_pythonconfig_DATA += example/example.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += example/README.md example/Makefile.inc
+
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
new file mode 100644
index 000000000..f9f314ac4
--- /dev/null
+++ b/collectors/python.d.plugin/example/README.md
@@ -0,0 +1 @@
+An example python data collection module. \ No newline at end of file
diff --git a/python.d/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
index ee7ff62fc..85defa4d1 100644
--- a/python.d/example.chart.py
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: example netdata python.d module
-# Author: Pawel Krupa (paulfantom)
+# Author: Put your name here (your github login)
+# SPDX-License-Identifier: GPL-3.0-or-later
from random import SystemRandom
diff --git a/conf.d/python.d/example.conf b/collectors/python.d.plugin/example/example.conf
index e7fed9b50..e7fed9b50 100644
--- a/conf.d/python.d/example.conf
+++ b/collectors/python.d.plugin/example/example.conf
diff --git a/collectors/python.d.plugin/exim/Makefile.inc b/collectors/python.d.plugin/exim/Makefile.inc
new file mode 100644
index 000000000..36ffa56d2
--- /dev/null
+++ b/collectors/python.d.plugin/exim/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += exim/exim.chart.py
+dist_pythonconfig_DATA += exim/exim.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += exim/README.md exim/Makefile.inc
+
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
new file mode 100644
index 000000000..b9a62cad9
--- /dev/null
+++ b/collectors/python.d.plugin/exim/README.md
@@ -0,0 +1,13 @@
+# exim
+
+Simple module executing `exim -bpc` to grab exim queue.
+This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
+
+It produces only one chart:
+
+1. **Exim Queue Emails**
+ * emails
+
+Configuration is not needed.
+
+---
diff --git a/python.d/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
index 2e5b924ba..5431dd46b 100644
--- a/python.d/exim.chart.py
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: exim netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.ExecutableService import ExecutableService
@@ -14,17 +15,18 @@ ORDER = ['qemails']
CHARTS = {
'qemails': {
- 'options': [None, "Exim Queue Emails", "emails", 'queue', 'exim.qemails', 'line'],
+ 'options': [None, 'Exim Queue Emails', 'emails', 'queue', 'exim.qemails', 'line'],
'lines': [
['emails', None, 'absolute']
- ]}
+ ]
+ }
}
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = "exim -bpc"
+ self.command = 'exim -bpc'
self.order = ORDER
self.definitions = CHARTS
diff --git a/conf.d/python.d/exim.conf b/collectors/python.d.plugin/exim/exim.conf
index 2add7b2cb..2add7b2cb 100644
--- a/conf.d/python.d/exim.conf
+++ b/collectors/python.d.plugin/exim/exim.conf
diff --git a/collectors/python.d.plugin/fail2ban/Makefile.inc b/collectors/python.d.plugin/fail2ban/Makefile.inc
new file mode 100644
index 000000000..31e117e53
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += fail2ban/fail2ban.chart.py
+dist_pythonconfig_DATA += fail2ban/fail2ban.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += fail2ban/README.md fail2ban/Makefile.inc
+
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
new file mode 100644
index 000000000..2ab021965
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -0,0 +1,23 @@
+# fail2ban
+
+Module monitor fail2ban log file to show all bans for all active jails
+
+**Requirements:**
+ * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
+
+It produces one chart with multiple lines (one line per jail)
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ log_path: '/var/log/fail2ban.log'
+ conf_path: '/etc/fail2ban/jail.local'
+ exclude: 'dropbear apache'
+```
+If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
+If conf file is not found default jail is `ssh`.
+
+---
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
new file mode 100644
index 000000000..954689008
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
@@ -0,0 +1,196 @@
+# -*- coding: utf-8 -*-
+# Description: fail2ban log netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+import os
+
+from collections import defaultdict
+from glob import glob
+
+from bases.FrameworkServices.LogService import LogService
+
+
+ORDER = [
+ 'jails_bans',
+ 'jails_in_jail',
+]
+
+
+def charts(jails):
+ """
+ Chart definitions creating
+ """
+
+ ch = {
+ ORDER[0]: {
+ 'options': [None, 'Jails Ban Rate', 'bans/s', 'bans', 'jail.bans', 'line'],
+ 'lines': []
+ },
+ ORDER[1]: {
+ 'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', 'in jail',
+ 'jail.in_jail', 'line'],
+ 'lines': []
+ },
+ }
+ for jail in jails:
+ ch[ORDER[0]]['lines'].append([jail, jail, 'incremental'])
+ ch[ORDER[1]]['lines'].append(['{0}_in_jail'.format(jail), jail, 'absolute'])
+
+ return ch
+
+
+RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
+
+# Example:
+# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 217.59.246.27
+RE_DATA = re.compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>Unban|Ban) (?P<ip>[a-f0-9.:]+)')
+
+DEFAULT_JAILS = [
+ 'ssh',
+]
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ LogService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = dict()
+
+ self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
+ self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
+ self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
+ self.exclude = self.configuration.get('exclude', str())
+
+ self.monitoring_jails = list()
+ self.banned_ips = defaultdict(set)
+ self.data = dict()
+
+ def check(self):
+ """
+ :return: bool
+ """
+ if not self.conf_path.endswith(('.conf', '.local')):
+ self.error('{0} is a wrong conf path name, must be *.conf or *.local'.format(self.conf_path))
+ return False
+
+ if not os.access(self.log_path, os.R_OK):
+ self.error('{0} is not readable'.format(self.log_path))
+ return False
+
+ if os.path.getsize(self.log_path) == 0:
+ self.error('{0} is empty'.format(self.log_path))
+ return False
+
+ self.monitoring_jails = self.jails_auto_detection()
+ for jail in self.monitoring_jails:
+ self.data[jail] = 0
+ self.data['{0}_in_jail'.format(jail)] = 0
+
+ self.definitions = charts(self.monitoring_jails)
+ self.info('monitoring jails: {0}'.format(self.monitoring_jails))
+
+ return True
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ raw = self._get_raw_data()
+
+ if not raw:
+ return None if raw is None else self.data
+
+ for row in raw:
+ match = RE_DATA.search(row)
+
+ if not match:
+ continue
+
+ match = match.groupdict()
+
+ if match['jail'] not in self.monitoring_jails:
+ continue
+
+ jail, action, ip = match['jail'], match['action'], match['ip']
+
+ if action == 'Ban':
+ self.data[jail] += 1
+ if ip not in self.banned_ips[jail]:
+ self.banned_ips[jail].add(ip)
+ self.data['{0}_in_jail'.format(jail)] += 1
+ else:
+ if ip in self.banned_ips[jail]:
+ self.banned_ips[jail].remove(ip)
+ self.data['{0}_in_jail'.format(jail)] -= 1
+
+ return self.data
+
+ def get_files_from_dir(self, dir_path, suffix):
+ """
+ :return: list
+ """
+ if not os.path.isdir(dir_path):
+ self.error('{0} is not a directory'.format(dir_path))
+ return list()
+
+ return glob('{0}/*.{1}'.format(self.conf_dir, suffix))
+
+ def get_jails_from_file(self, file_path):
+ """
+ :return: list
+ """
+ if not os.access(file_path, os.R_OK):
+ self.error('{0} is not readable or not exist'.format(file_path))
+ return list()
+
+ with open(file_path, 'rt') as f:
+ lines = f.readlines()
+ raw = ' '.join(line for line in lines if line.startswith(('[', 'enabled')))
+
+ match = RE_JAILS.findall(raw)
+ # Result: [('ssh', 'true'), ('dropbear', 'true'), ('pam-generic', 'true'), ...]
+
+ if not match:
+ self.debug('{0} parse failed'.format(file_path))
+ return list()
+
+ return match
+
+ def jails_auto_detection(self):
+ """
+ :return: list
+
+ Parses jail configuration files. Returns list of enabled jails.
+ According man jail.conf parse order must be
+ * jail.conf
+ * jail.d/*.conf (in alphabetical order)
+ * jail.local
+ * jail.d/*.local (in alphabetical order)
+ """
+ jails_files, all_jails, active_jails = list(), list(), list()
+
+ jails_files.append('{0}.conf'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'conf'))
+ jails_files.append('{0}.local'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'local'))
+
+ self.debug('config files to parse: {0}'.format(jails_files))
+
+ for f in jails_files:
+ all_jails.extend(self.get_jails_from_file(f))
+
+ exclude = self.exclude.split()
+
+ for name, status in all_jails:
+ if name in exclude:
+ continue
+
+ if status == 'true' and name not in active_jails:
+ active_jails.append(name)
+ elif status == 'false' and name in active_jails:
+ active_jails.remove(name)
+
+ return active_jails or DEFAULT_JAILS
diff --git a/conf.d/python.d/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf
index 60ca87231..60ca87231 100644
--- a/conf.d/python.d/fail2ban.conf
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf
diff --git a/collectors/python.d.plugin/freeradius/Makefile.inc b/collectors/python.d.plugin/freeradius/Makefile.inc
new file mode 100644
index 000000000..54aa6492f
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += freeradius/freeradius.chart.py
+dist_pythonconfig_DATA += freeradius/freeradius.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += freeradius/README.md freeradius/Makefile.inc
+
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
new file mode 100644
index 000000000..e5fe88ec3
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -0,0 +1,70 @@
+# freeradius
+
+Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
+
+It produces:
+
+1. **Authentication counters:**
+ * access-accepts
+ * access-rejects
+ * auth-dropped-requests
+ * auth-duplicate-requests
+ * auth-invalid-requests
+ * auth-malformed-requests
+ * auth-unknown-types
+
+2. **Accounting counters:** [optional]
+ * accounting-requests
+ * accounting-responses
+ * acct-dropped-requests
+ * acct-duplicate-requests
+ * acct-invalid-requests
+ * acct-malformed-requests
+ * acct-unknown-types
+
+3. **Proxy authentication counters:** [optional]
+ * proxy-access-accepts
+ * proxy-access-rejects
+ * proxy-auth-dropped-requests
+ * proxy-auth-duplicate-requests
+ * proxy-auth-invalid-requests
+ * proxy-auth-malformed-requests
+ * proxy-auth-unknown-types
+
+4. **Proxy accounting counters:** [optional]
+ * proxy-accounting-requests
+ * proxy-accounting-responses
+ * proxy-acct-dropped-requests
+ * proxy-acct-duplicate-requests
+ * proxy-acct-invalid-requests
+ * proxy-acct-malformed-requests
+ * proxy-acct-unknown-typesa
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'localhost'
+ port : '18121'
+ secret : 'adminsecret'
+ acct : False # Freeradius accounting statistics.
+ proxy_auth : False # Freeradius proxy authentication statistics.
+ proxy_acct : False # Freeradius proxy accounting statistics.
+```
+
+**Freeradius server configuration:**
+
+The configuration for the status server is automatically created in the sites-available directory.
+By default, server is enabled and can be queried from every client.
+FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
+
+To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
+ * cd sites-enabled
+ * ln -s ../sites-available/status status
+
+and restart/reload your FREERADIUS server.
+
+---
diff --git a/python.d/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
index 3acc58d1a..3126831b7 100644
--- a/python.d/freeradius.chart.py
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: freeradius netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from re import findall
from subprocess import Popen, PIPE
@@ -20,7 +21,7 @@ ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct']
CHARTS = {
'authentication': {
- 'options': [None, "Authentication", "packets/s", 'Authentication', 'freerad.auth', 'line'],
+ 'options': [None, 'Authentication', 'packets/s', 'Authentication', 'freerad.auth', 'line'],
'lines': [
['access-accepts', None, 'incremental'],
['access-rejects', None, 'incremental'],
@@ -29,9 +30,10 @@ CHARTS = {
['auth-invalid-requests', 'invalid-requests', 'incremental'],
['auth-malformed-requests', 'malformed-requests', 'incremental'],
['auth-unknown-types', 'unknown-types', 'incremental']
- ]},
+ ]
+ },
'accounting': {
- 'options': [None, "Accounting", "packets/s", 'Accounting', 'freerad.acct', 'line'],
+ 'options': [None, 'Accounting', 'packets/s', 'Accounting', 'freerad.acct', 'line'],
'lines': [
['accounting-requests', 'requests', 'incremental'],
['accounting-responses', 'responses', 'incremental'],
@@ -40,9 +42,10 @@ CHARTS = {
['acct-invalid-requests', 'invalid-requests', 'incremental'],
['acct-malformed-requests', 'malformed-requests', 'incremental'],
['acct-unknown-types', 'unknown-types', 'incremental']
- ]},
+ ]
+ },
'proxy-auth': {
- 'options': [None, "Proxy Authentication", "packets/s", 'Authentication', 'freerad.proxy.auth', 'line'],
+ 'options': [None, 'Proxy Authentication', 'packets/s', 'Authentication', 'freerad.proxy.auth', 'line'],
'lines': [
['proxy-access-accepts', 'access-accepts', 'incremental'],
['proxy-access-rejects', 'access-rejects', 'incremental'],
@@ -51,9 +54,10 @@ CHARTS = {
['proxy-auth-invalid-requests', 'invalid-requests', 'incremental'],
['proxy-auth-malformed-requests', 'malformed-requests', 'incremental'],
['proxy-auth-unknown-types', 'unknown-types', 'incremental']
- ]},
+ ]
+ },
'proxy-acct': {
- 'options': [None, "Proxy Accounting", "packets/s", 'Accounting', 'freerad.proxy.acct', 'line'],
+ 'options': [None, 'Proxy Accounting', 'packets/s', 'Accounting', 'freerad.proxy.acct', 'line'],
'lines': [
['proxy-accounting-requests', 'requests', 'incremental'],
['proxy-accounting-responses', 'responses', 'incremental'],
@@ -62,8 +66,8 @@ CHARTS = {
['proxy-acct-invalid-requests', 'invalid-requests', 'incremental'],
['proxy-acct-malformed-requests', 'malformed-requests', 'incremental'],
['proxy-acct-unknown-types', 'unknown-types', 'incremental']
- ]}
-
+ ]
+ }
}
@@ -105,7 +109,7 @@ class Service(SimpleService):
"""
result = self._get_raw_data()
return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)])
-
+
def _get_raw_data(self):
"""
The following code is equivalent to
diff --git a/conf.d/python.d/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf
index 3336d4c49..3336d4c49 100644
--- a/conf.d/python.d/freeradius.conf
+++ b/collectors/python.d.plugin/freeradius/freeradius.conf
diff --git a/collectors/python.d.plugin/go_expvar/Makefile.inc b/collectors/python.d.plugin/go_expvar/Makefile.inc
new file mode 100644
index 000000000..74f50d765
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += go_expvar/go_expvar.chart.py
+dist_pythonconfig_DATA += go_expvar/go_expvar.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += go_expvar/README.md go_expvar/Makefile.inc
+
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
new file mode 100644
index 000000000..6309c195f
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -0,0 +1,276 @@
+# go_expvar
+
+The `go_expvar` module can monitor any Go application that exposes its metrics with the use of
+`expvar` package from the Go standard library.
+
+`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts.
+
+For the memory statistics, it produces the following charts:
+
+1. **Heap allocations** in kB
+ * alloc: size of objects allocated on the heap
+ * inuse: size of allocated heap spans
+
+2. **Stack allocations** in kB
+ * inuse: size of allocated stack spans
+
+3. **MSpan allocations** in kB
+ * inuse: size of allocated mspan structures
+
+4. **MCache allocations** in kB
+ * inuse: size of allocated mcache structures
+
+5. **Virtual memory** in kB
+ * sys: size of reserved virtual address space
+
+6. **Live objects**
+ * live: number of live objects in memory
+
+7. **GC pauses average** in ns
+ * avg: average duration of all GC stop-the-world pauses
+
+
+## Monitoring Go Applications
+
+Netdata can be used to monitor running Go applications that expose their metrics with
+the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library.
+
+The `expvar` package exposes these metrics over HTTP and is very easy to use.
+Consider this minimal sample below:
+
+```go
+package main
+
+import (
+ _ "expvar"
+ "net/http"
+)
+
+func main() {
+ http.ListenAndServe("127.0.0.1:8080", nil)
+}
+```
+
+When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that
+exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening
+the URL in your browser (or by using `wget` or `curl`).
+
+Sample output:
+
+```json
+{
+"cmdline": ["./expvar-demo-binary"],
+"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <ommited for brevity>}
+}
+```
+
+You can of course expose and monitor your own variables as well.
+Here is a sample Go application that exposes a few custom variables:
+
+```go
+package main
+
+import (
+ "expvar"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func main() {
+
+ tick := time.NewTicker(1 * time.Second)
+ num_go := expvar.NewInt("runtime.goroutines")
+ counters := expvar.NewMap("counters")
+ counters.Set("cnt1", new(expvar.Int))
+ counters.Set("cnt2", new(expvar.Float))
+
+ go http.ListenAndServe(":8080", nil)
+
+ for {
+ select {
+ case <- tick.C:
+ num_go.Set(int64(runtime.NumGoroutine()))
+ counters.Add("cnt1", 1)
+ counters.AddFloat("cnt2", 1.452)
+ }
+ }
+}
+```
+
+Apart from the runtime memory stats, this application publishes two counters and the
+number of currently running Goroutines and updates these stats every second.
+
+In the next section, we will cover how to monitor and chart these exposed stats with
+the use of `netdata`s ```go_expvar``` module.
+
+### Using netdata go_expvar module
+
+The `go_expvar` module is disabled by default. To enable it, edit [`python.d.conf`](../python.d.conf)
+(to edit it on your system run `/etc/netdata/edit-config python.d.conf`), and change the `go_expvar`
+variable to `yes`:
+
+```
+# Enable / Disable python.d.plugin modules
+#default_run: yes
+#
+# If "default_run" = "yes" the default for all modules is enabled (yes).
+# Setting any of these to "no" will disable it.
+#
+# If "default_run" = "no" the default for all modules is disabled (no).
+# Setting any of these to "yes" will enable it.
+...
+go_expvar: yes
+...
+```
+
+Next, we need to edit the module configuration file (found at [`/etc/netdata/python.d/go_expvar.conf`](go_expvar.conf) by default)
+(to edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`).
+The module configuration consists of jobs, where each job can be used to monitor a separate Go application.
+Let's see a sample job configuration:
+
+```
+# /etc/netdata/python.d/go_expvar.conf
+
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts: {}
+```
+
+Let's go over each of the defined options:
+
+ name: 'app1'
+
+This is the job name that will appear at the netdata dashboard.
+If not defined, the job_name (top level key) will be used.
+
+ url: 'http://127.0.0.1:8080/debug/vars'
+
+This is the URL of the expvar endpoint. As the expvar handler can be installed
+in a custom path, the whole URL has to be specified. This value is mandatory.
+
+ collect_memstats: true
+
+Whether to enable collecting stats about Go runtime's memory. You can find more
+information about the exposed values at the [runtime package docs](https://golang.org/pkg/runtime/#MemStats).
+
+ extra_charts: {}
+
+Enables the user to specify custom expvars to monitor and chart.
+Will be explained in more detail below.
+
+**Note: if `collect_memstats` is disabled and no `extra_charts` are defined, the plugin will
+disable itself, as there will be no data to collect!**
+
+Apart from these options, each job supports options inherited from netdata's `python.d.plugin`
+and its base `UrlService` class. These are:
+
+ update_every: 1 # the job's data collection frequency
+ priority: 60000 # the job's order on the dashboard
+ retries: 60 # the job's number of restoration attempts
+ user: admin # use when the expvar endpoint is protected by HTTP Basic Auth
+ password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth
+
+### Monitoring custom vars with go_expvar
+
+Now, memory stats might be useful, but what if you want netdata to monitor some custom values
+that your Go application exposes? The `go_expvar` module can do that as well with the use of
+the `extra_charts` configuration variable.
+
+The `extra_charts` variable is a YaML list of netdata chart definitions.
+Each chart definition has the following keys:
+
+ id: netdata chart ID
+ options: a key-value mapping of chart options
+ lines: a list of line definitions
+
+**Note: please do not use dots in the chart or line ID field.
+See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
+
+Please see these two links to the official netdata documentation for more information about the values:
+
+- [External plugins - charts](../../plugins.d/#chart)
+- [Chart variables](https://github.com/netdata/netdata/wiki/How-to-write-new-module#global-variables-order-and-chart)
+
+**Line definitions**
+
+Each chart can define multiple lines (dimensions).
+A line definition is a key-value mapping of line options.
+Each line can have the following options:
+
+ # mandatory
+ expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
+ expvar_type: value type; supported are "float" or "int"
+ id: the id of this line/dimension in netdata
+
+ # optional - netdata defaults are used if these options are not defined
+ name: ''
+ algorithm: absolute
+ multiplier: 1
+ divisor: 100 if expvar_type == float, 1 if expvar_type == int
+ hidden: False
+
+Please see the following link for more information about the options and their default values:
+[External plugins - dimensions](../../plugins.d/#dimension)
+
+Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
+All dicts in the resulting JSON document are then flattened to one level.
+Expvar names are joined together with '.' when flattening.
+
+Example:
+```
+{
+ "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
+ "runtime.goroutines": 5
+}
+```
+
+In the above case, the exported variables will be available under `runtime.goroutines`,
+`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
+the first defined key wins and all subsequent keys with the same name are ignored.
+
+**Configuration example**
+
+The configuration below matches the second Go application described above.
+Netdata will monitor and chart memory stats for the application, as well as a custom chart of
+running goroutines and two dummy counters.
+
+```
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts:
+ - id: "runtime_goroutines"
+ options:
+ name: num_goroutines
+ title: "runtime: number of goroutines"
+ units: goroutines
+ family: runtime
+ context: expvar.runtime.goroutines
+ chart_type: line
+ lines:
+ - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+ - id: "foo_counters"
+ options:
+ name: counters
+ title: "some random counters"
+ units: awesomeness
+ family: counters
+ context: expvar.foo.counters
+ chart_type: line
+ lines:
+ - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+ - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+```
+
+**Netdata charts example**
+
+The images below show how do the final charts in netdata look.
+
+![Memory stats charts](https://cloud.githubusercontent.com/assets/15180106/26762052/62b4af58-493b-11e7-9e69-146705acfc2c.png)
+
+![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png)
+
diff --git a/python.d/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
index cbd462570..76e8b72ec 100644
--- a/python.d/go_expvar.chart.py
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: go_expvar netdata python.d module
# Author: Jan Kral (kralewitz)
+# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import division
import json
@@ -20,43 +21,50 @@ MEMSTATS_CHARTS = {
'lines': [
['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_stack': {
'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats',
'expvar.memstats.stack', 'line'],
'lines': [
['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_mspan': {
'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats',
'expvar.memstats.mspan', 'line'],
'lines': [
['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_mcache': {
'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats',
'expvar.memstats.mcache', 'line'],
'lines': [
['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_live_objects': {
'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats',
'expvar.memstats.live_objects', 'line'],
'lines': [
['memstats_live_objects', 'live']
- ]},
+ ]
+ },
'memstats_sys': {
'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats',
'expvar.memstats.sys', 'line'],
'lines': [
['memstats_sys', 'sys', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'memstats_gc_pauses': {
'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats',
'expvar.memstats.gc_pauses', 'line'],
'lines': [
['memstats_gc_pauses', 'avg']
- ]},
+ ]
+ }
}
MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache',
diff --git a/conf.d/python.d/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf
index c352b1674..ba8922d2e 100644
--- a/conf.d/python.d/go_expvar.conf
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf
@@ -76,7 +76,7 @@
#
# Please visit the module wiki page for more information on how to use the extra_charts variable:
#
-# https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#monitoring-custom-vars-with-go_expvar
+# https://github.com/netdata/netdata/wiki/Monitoring-Go-Applications#monitoring-custom-vars-with-go_expvar
#
# Configuration example
# ---------------------
diff --git a/collectors/python.d.plugin/haproxy/Makefile.inc b/collectors/python.d.plugin/haproxy/Makefile.inc
new file mode 100644
index 000000000..ad24deaa0
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += haproxy/haproxy.chart.py
+dist_pythonconfig_DATA += haproxy/haproxy.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += haproxy/README.md haproxy/Makefile.inc
+
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
new file mode 100644
index 000000000..4bff25670
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -0,0 +1,49 @@
+# haproxy
+
+Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
+And health metrics such as backend servers status (server check should be used).
+
+Plugin can obtain data from url **OR** unix socket.
+
+**Requirement:**
+Socket MUST be readable AND writable by netdata user.
+
+It produces:
+
+1. **Frontend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+2. **Backend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+3. **Health** chart
+ * number of failed servers for every backend (in DOWN state)
+
+
+### configuration
+
+Sample:
+
+```yaml
+via_url:
+ user : 'username' # ONLY IF stats auth is used
+ pass : 'password' # # ONLY IF stats auth is used
+ url : 'http://ip.address:port/url;csv;norefresh'
+```
+
+OR
+
+```yaml
+via_socket:
+ socket : 'path/to/haproxy/sock'
+```
+
+If no configuration is given, module will fail to run.
+
+---
diff --git a/python.d/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
index 3061f5ef2..a46689f50 100644
--- a/python.d/haproxy.chart.py
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: haproxy netdata python.d module
# Author: l2isbad, ktarasz
+# SPDX-License-Identifier: GPL-3.0-or-later
from collections import defaultdict
from re import compile as re_compile
@@ -20,155 +21,185 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['fbin', 'fbout', 'fscur', 'fqcur',
- 'fhrsp_1xx', 'fhrsp_2xx', 'fhrsp_3xx', 'fhrsp_4xx', 'fhrsp_5xx', 'fhrsp_other', 'fhrsp_total',
- 'bbin', 'bbout', 'bscur', 'bqcur',
- 'bhrsp_1xx', 'bhrsp_2xx', 'bhrsp_3xx', 'bhrsp_4xx', 'bhrsp_5xx', 'bhrsp_other', 'bhrsp_total',
- 'bqtime', 'bttime', 'brtime', 'bctime',
- 'health_sup', 'health_sdown', 'health_bdown', 'health_idle']
+ORDER = [
+ 'fbin',
+ 'fbout',
+ 'fscur',
+ 'fqcur',
+ 'fhrsp_1xx',
+ 'fhrsp_2xx',
+ 'fhrsp_3xx',
+ 'fhrsp_4xx',
+ 'fhrsp_5xx',
+ 'fhrsp_other',
+ 'fhrsp_total',
+ 'bbin',
+ 'bbout',
+ 'bscur',
+ 'bqcur',
+ 'bhrsp_1xx',
+ 'bhrsp_2xx',
+ 'bhrsp_3xx',
+ 'bhrsp_4xx',
+ 'bhrsp_5xx',
+ 'bhrsp_other',
+ 'bhrsp_total',
+ 'bqtime',
+ 'bttime',
+ 'brtime',
+ 'bctime',
+ 'health_sup',
+ 'health_sdown',
+ 'health_bdown',
+ 'health_idle'
+]
CHARTS = {
'fbin': {
- 'options': [None, "Kilobytes In", "KB/s", 'frontend', 'haproxy_f.bin', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Kilobytes In', 'KB/s', 'frontend', 'haproxy_f.bin', 'line'],
+ 'lines': []
+ },
'fbout': {
- 'options': [None, "Kilobytes Out", "KB/s", 'frontend', 'haproxy_f.bout', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Kilobytes Out', 'KB/s', 'frontend', 'haproxy_f.bout', 'line'],
+ 'lines': []
+ },
'fscur': {
- 'options': [None, "Sessions Active", "sessions", 'frontend', 'haproxy_f.scur', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Sessions Active', 'sessions', 'frontend', 'haproxy_f.scur', 'line'],
+ 'lines': []
+ },
'fqcur': {
- 'options': [None, "Session In Queue", "sessions", 'frontend', 'haproxy_f.qcur', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Session In Queue', 'sessions', 'frontend', 'haproxy_f.qcur', 'line'],
+ 'lines': []
+ },
'fhrsp_1xx': {
- 'options': [None, "HTTP responses with 1xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
+ 'lines': []
+ },
'fhrsp_2xx': {
- 'options': [None, "HTTP responses with 2xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
+ 'lines': []
+ },
'fhrsp_3xx': {
- 'options': [None, "HTTP responses with 3xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
+ 'lines': []
+ },
'fhrsp_4xx': {
- 'options': [None, "HTTP responses with 4xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
+ 'lines': []
+ },
'fhrsp_5xx': {
- 'options': [None, "HTTP responses with 5xx code", "responses/s", 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
+ 'lines': []
+ },
'fhrsp_other': {
- 'options': [None, "HTTP responses with other codes (protocol error)", "responses/s", 'frontend', 'haproxy_f.hrsp_other', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'frontend',
+ 'haproxy_f.hrsp_other', 'line'],
+ 'lines': []
+ },
'fhrsp_total': {
- 'options': [None, "HTTP responses", "responses", 'frontend', 'haproxy_f.hrsp_total', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses', 'responses', 'frontend', 'haproxy_f.hrsp_total', 'line'],
+ 'lines': []
+ },
'bbin': {
- 'options': [None, "Kilobytes In", "KB/s", 'backend', 'haproxy_b.bin', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Kilobytes In', 'KB/s', 'backend', 'haproxy_b.bin', 'line'],
+ 'lines': []
+ },
'bbout': {
- 'options': [None, "Kilobytes Out", "KB/s", 'backend', 'haproxy_b.bout', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Kilobytes Out', 'KB/s', 'backend', 'haproxy_b.bout', 'line'],
+ 'lines': []
+ },
'bscur': {
- 'options': [None, "Sessions Active", "sessions", 'backend', 'haproxy_b.scur', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Sessions Active', 'sessions', 'backend', 'haproxy_b.scur', 'line'],
+ 'lines': []
+ },
'bqcur': {
- 'options': [None, "Sessions In Queue", "sessions", 'backend', 'haproxy_b.qcur', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Sessions In Queue', 'sessions', 'backend', 'haproxy_b.qcur', 'line'],
+ 'lines': []
+ },
'bhrsp_1xx': {
- 'options': [None, "HTTP responses with 1xx code", "responses/s", 'backend', 'haproxy_b.hrsp_1xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_1xx', 'line'],
+ 'lines': []
+ },
'bhrsp_2xx': {
- 'options': [None, "HTTP responses with 2xx code", "responses/s", 'backend', 'haproxy_b.hrsp_2xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_2xx', 'line'],
+ 'lines': []
+ },
'bhrsp_3xx': {
- 'options': [None, "HTTP responses with 3xx code", "responses/s", 'backend', 'haproxy_b.hrsp_3xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_3xx', 'line'],
+ 'lines': []
+ },
'bhrsp_4xx': {
- 'options': [None, "HTTP responses with 4xx code", "responses/s", 'backend', 'haproxy_b.hrsp_4xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_4xx', 'line'],
+ 'lines': []
+ },
'bhrsp_5xx': {
- 'options': [None, "HTTP responses with 5xx code", "responses/s", 'backend', 'haproxy_b.hrsp_5xx', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_5xx', 'line'],
+ 'lines': []
+ },
'bhrsp_other': {
- 'options': [None, "HTTP responses with other codes (protocol error)", "responses/s", 'backend',
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'backend',
'haproxy_b.hrsp_other', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'bhrsp_total': {
- 'options': [None, "HTTP responses (total)", "responses/s", 'backend', 'haproxy_b.hrsp_total', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'HTTP responses (total)', 'responses/s', 'backend', 'haproxy_b.hrsp_total', 'line'],
+ 'lines': []
+ },
'bqtime': {
- 'options': [None, "The average queue time over the 1024 last requests", "ms", 'backend', 'haproxy_b.qtime', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'The average queue time over the 1024 last requests', 'ms', 'backend',
+ 'haproxy_b.qtime', 'line'],
+ 'lines': []
+ },
'bctime': {
- 'options': [None, "The average connect time over the 1024 last requests", "ms", 'backend',
+ 'options': [None, 'The average connect time over the 1024 last requests', 'ms', 'backend',
'haproxy_b.ctime', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'brtime': {
- 'options': [None, "The average response time over the 1024 last requests", "ms", 'backend',
+ 'options': [None, 'The average response time over the 1024 last requests', 'ms', 'backend',
'haproxy_b.rtime', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'bttime': {
- 'options': [None, "The average total session time over the 1024 last requests", "ms", 'backend',
+ 'options': [None, 'The average total session time over the 1024 last requests', 'ms', 'backend',
'haproxy_b.ttime', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'health_sdown': {
- 'options': [None, "Backend Servers In DOWN State", "failed servers", 'health',
+ 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health',
'haproxy_hs.down', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'health_sup': {
- 'options': [None, "Backend Servers In UP State", "health servers", 'health',
+ 'options': [None, 'Backend Servers In UP State', 'health servers', 'health',
'haproxy_hs.up', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'health_bdown': {
- 'options': [None, "Is Backend Alive? 1 = DOWN", "failed backend", 'health', 'haproxy_hb.down', 'line'],
- 'lines': [
- ]},
+ 'options': [None, 'Is Backend Alive? 1 = DOWN', 'failed backend', 'health', 'haproxy_hb.down', 'line'],
+ 'lines': []
+ },
'health_idle': {
- 'options': [None, "The Ratio Of Polling Time Vs Total Time", "percent", 'health', 'haproxy.idle', 'line'],
+ 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percent', 'health', 'haproxy.idle', 'line'],
'lines': [
['idle', None, 'absolute']
- ]}
+ ]
+ }
}
-METRICS = {'bin': {'algorithm': 'incremental', 'divisor': 1024},
- 'bout': {'algorithm': 'incremental', 'divisor': 1024},
- 'scur': {'algorithm': 'absolute', 'divisor': 1},
- 'qcur': {'algorithm': 'absolute', 'divisor': 1},
- 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1},
- }
+METRICS = {
+ 'bin': {'algorithm': 'incremental', 'divisor': 1024},
+ 'bout': {'algorithm': 'incremental', 'divisor': 1024},
+ 'scur': {'algorithm': 'absolute', 'divisor': 1},
+ 'qcur': {'algorithm': 'absolute', 'divisor': 1},
+ 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
+}
BACKEND_METRICS = {
diff --git a/conf.d/python.d/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
index a40dd76a5..a40dd76a5 100644
--- a/conf.d/python.d/haproxy.conf
+++ b/collectors/python.d.plugin/haproxy/haproxy.conf
diff --git a/collectors/python.d.plugin/hddtemp/Makefile.inc b/collectors/python.d.plugin/hddtemp/Makefile.inc
new file mode 100644
index 000000000..22852b646
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += hddtemp/hddtemp.chart.py
+dist_pythonconfig_DATA += hddtemp/hddtemp.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc
+
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
new file mode 100644
index 000000000..1236186a5
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -0,0 +1,22 @@
+# hddtemp
+
+Module monitors disk temperatures from one or more hddtemp daemons.
+
+**Requirement:**
+Running `hddtemp` in daemonized mode with access on tcp port
+
+It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
+
+### configuration
+
+Sample:
+
+```yaml
+update_every: 3
+host: "127.0.0.1"
+port: 7634
+```
+
+If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
+
+---
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
new file mode 100644
index 000000000..dea701171
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Description: hddtemp netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+
+ORDER = ['temperatures']
+
+CHARTS = {
+ 'temperatures': {
+ 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'],
+ 'lines': [
+ # lines are created dynamically in `check()` method
+ ]}}
+
+RE = re.compile(r'\/dev\/([^|]+)\|([^|]+)\|([0-9]+|SLP|UNK)\|')
+
+
+class Disk:
+ def __init__(self, id_, name, temp):
+ self.id = id_.split('/')[-1]
+ self.name = name.replace(' ', '_')
+ self.temp = temp if temp.isdigit() else 0
+
+ def __repr__(self):
+ return self.id
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self._keep_alive = False
+ self.request = ""
+ self.host = "127.0.0.1"
+ self.port = 7634
+ self.do_only = self.configuration.get('devices')
+
+ def get_disks(self):
+ r = self._get_raw_data()
+
+ if not r:
+ return None
+
+ m = RE.findall(r)
+
+ if not m:
+ self.error("received data doesn't have needed records")
+ return None
+
+ rv = [Disk(*d) for d in m]
+ self.debug('available disks: {0}'.format(rv))
+
+ if self.do_only:
+ return [v for v in rv if v.id in self.do_only]
+ return rv
+
+ def get_data(self):
+ """
+ Get data from TCP/IP socket
+ :return: dict
+ """
+
+ disks = self.get_disks()
+
+ if not disks:
+ return None
+
+ return dict((d.id, d.temp) for d in disks)
+
+ def check(self):
+ """
+ Parse configuration, check if hddtemp is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ self._parse_config()
+ disks = self.get_disks()
+
+ if not disks:
+ return False
+
+ for d in disks:
+ n = d.id if d.id.startswith('sd') else d.name
+ dim = [d.id, n]
+ self.definitions['temperatures']['lines'].append(dim)
+
+ return True
+
+ @staticmethod
+ def _check_raw_data(data):
+ return not bool(data)
diff --git a/conf.d/python.d/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf
index 9165798a2..9165798a2 100644
--- a/conf.d/python.d/hddtemp.conf
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf
diff --git a/collectors/python.d.plugin/httpcheck/Makefile.inc b/collectors/python.d.plugin/httpcheck/Makefile.inc
new file mode 100644
index 000000000..4a5bd856d
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += httpcheck/httpcheck.chart.py
+dist_pythonconfig_DATA += httpcheck/httpcheck.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += httpcheck/README.md httpcheck/Makefile.inc
+
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
new file mode 100644
index 000000000..759107663
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -0,0 +1,41 @@
+# httpcheck
+
+Module monitors remote http server for availability and response time.
+
+Following charts are drawn per job:
+
+1. **Response time** ms
+ * Time in 0.1 ms resolution in which the server responds.
+ If the connection failed, the value is missing.
+
+2. **Status** boolean
+ * Connection successful
+ * Unexpected content: No Regex match found in the response
+ * Unexpected status code: Do we get 500 errors?
+ * Connection failed: port not listening or blocked
+ * Connection timed out: host or port unreachable
+
+### configuration
+
+Sample configuration and their default values.
+
+```yaml
+server:
+ url: 'http://host:port/path' # required
+ status_accepted: # optional
+ - 200
+ timeout: 1 # optional, supports decimals (e.g. 0.2)
+ update_every: 3 # optional
+ regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html
+ redirect: yes # optional
+```
+
+### notes
+
+ * The status chart is primarily intended for alarms, badges or for access via API.
+ * A system/service/firewall might block netdata's access if a portscan or
+ similar is detected.
+ * This plugin is meant for simple use cases. Currently, the accuracy of the
+ response time is low and should be used as reference only.
+
+---
diff --git a/python.d/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
index b0177ff90..f046f33c0 100644
--- a/python.d/httpcheck.chart.py
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: http check netdata python.d module
# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
import urllib3
import re
@@ -35,12 +36,14 @@ CHARTS = {
'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'],
'lines': [
[HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
- ]},
+ ]
+ },
'response_length': {
'options': [None, 'HTTP response body length', 'characters', 'response', 'httpcheck.responselength', 'line'],
'lines': [
[HTTP_RESPONSE_LENGTH, 'length', 'absolute']
- ]},
+ ]
+ },
'status': {
'options': [None, 'HTTP status', 'boolean', 'status', 'httpcheck.status', 'line'],
'lines': [
@@ -49,7 +52,8 @@ CHARTS = {
[HTTP_BAD_STATUS, 'bad status', 'absolute'],
[HTTP_TIMEOUT, 'timeout', 'absolute'],
[HTTP_NO_CONNECTION, 'no connection', 'absolute']
- ]}
+ ]
+ }
}
@@ -87,15 +91,15 @@ class Service(UrlService):
self.process_response(content, data, status)
except urllib3.exceptions.NewConnectionError as error:
- self.debug("Connection failed: {url}. Error: {error}".format(url=url, error=error))
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
data[HTTP_NO_CONNECTION] = 1
except (urllib3.exceptions.TimeoutError, urllib3.exceptions.PoolError) as error:
- self.debug("Connection timed out: {url}. Error: {error}".format(url=url, error=error))
+ self.debug('Connection timed out: {url}. Error: {error}'.format(url=url, error=error))
data[HTTP_TIMEOUT] = 1
except urllib3.exceptions.HTTPError as error:
- self.debug("Connection failed: {url}. Error: {error}".format(url=url, error=error))
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
data[HTTP_NO_CONNECTION] = 1
except (TypeError, AttributeError) as error:
@@ -109,7 +113,7 @@ class Service(UrlService):
self.debug('Content: \n\n{content}\n'.format(content=content))
if status in self.status_codes_accepted:
if self.regex and self.regex.search(content) is None:
- self.debug("No match for regex '{regex}' found".format(regex=self.regex.pattern))
+ self.debug('No match for regex "{regex}" found'.format(regex=self.regex.pattern))
data[HTTP_BAD_CONTENT] = 1
else:
data[HTTP_SUCCESS] = 1
diff --git a/conf.d/python.d/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
index 058e057a6..bd21b5af8 100644
--- a/conf.d/python.d/httpcheck.conf
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf
@@ -66,6 +66,7 @@ chart_cleanup: 0
# url: 'http[s]://host-ip-or-dns[:port][path]'
# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80
# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to /
+# method: GET # [optional] the HTTP request method (POST, PUT, DELETE, HEAD etc.)
# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be
# # followed (default).
# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the
diff --git a/collectors/python.d.plugin/icecast/Makefile.inc b/collectors/python.d.plugin/icecast/Makefile.inc
new file mode 100644
index 000000000..cb7c6fa0e
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += icecast/icecast.chart.py
+dist_pythonconfig_DATA += icecast/icecast.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += icecast/README.md icecast/Makefile.inc
+
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
new file mode 100644
index 000000000..a28a6c398
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -0,0 +1,26 @@
+# icecast
+
+This module will monitor number of listeners for active sources.
+
+**Requirements:**
+ * icecast version >= 2.4.0
+
+It produces the following charts:
+
+1. **Listeners** in listeners
+ * source number
+
+### configuration
+
+Needs only `url` to server's `/status-json.xsl`
+
+Here is an example for remote server:
+
+```yaml
+remote:
+ url : 'http://1.2.3.4:8443/status-json.xsl'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
+
+---
diff --git a/python.d/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
index 792b99f3f..d8813f9ba 100644
--- a/python.d/icecast.chart.py
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: icecast netdata python.d module
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
@@ -15,10 +16,10 @@ ORDER = ['listeners']
CHARTS = {
'listeners': {
- 'options': [None, 'Number Of Listeners', 'listeners',
- 'listeners', 'icecast.listeners', 'line'],
+ 'options': [None, 'Number Of Listeners', 'listeners', 'listeners', 'icecast.listeners', 'line'],
'lines': [
- ]}
+ ]
+ }
}
@@ -86,7 +87,11 @@ class Service(UrlService):
try:
data = json.loads(raw_data)
except ValueError as error:
- self.error("JSON decode error:", error)
+ self.error('JSON decode error:', error)
return None
- return data['icestats'].get('source')
+ sources = data['icestats'].get('source')
+ if not sources:
+ return None
+
+ return sources if isinstance(sources, list) else [sources]
diff --git a/conf.d/python.d/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf
index a900d06d3..a900d06d3 100644
--- a/conf.d/python.d/icecast.conf
+++ b/collectors/python.d.plugin/icecast/icecast.conf
diff --git a/collectors/python.d.plugin/ipfs/Makefile.inc b/collectors/python.d.plugin/ipfs/Makefile.inc
new file mode 100644
index 000000000..68458cb38
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ipfs/ipfs.chart.py
+dist_pythonconfig_DATA += ipfs/ipfs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ipfs/README.md ipfs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
new file mode 100644
index 000000000..a30649a5f
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -0,0 +1,25 @@
+# ipfs
+
+Module monitors [IPFS](https://ipfs.io) basic information.
+
+1. **Bandwidth** in kbits/s
+ * in
+ * out
+
+2. **Peers**
+ * peers
+
+### configuration
+
+Only url to IPFS server is needed.
+
+Sample:
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://localhost:5001'
+```
+
+---
+
diff --git a/python.d/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
index 43500dfb5..3f6794e48 100644
--- a/python.d/ipfs.chart.py
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: IPFS netdata python.d module
-# Authors: Pawel Krupa (paulfantom), davidak
+# Authors: davidak
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
@@ -26,31 +27,43 @@ CHARTS = {
'bandwidth': {
'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
'lines': [
- ["in", None, "absolute", 8, 1000],
- ["out", None, "absolute", -8, 1000]
- ]},
+ ['in', None, 'absolute', 8, 1000],
+ ['out', None, 'absolute', -8, 1000]
+ ]
+ },
'peers': {
'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
'lines': [
- ["peers", None, 'absolute']
- ]},
+ ['peers', None, 'absolute']
+ ]
+ },
'repo_size': {
'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'],
'lines': [
- ["avail", None, "absolute", 1, 1e9],
- ["size", None, "absolute", 1, 1e9],
- ]},
+ ['avail', None, 'absolute', 1, 1e9],
+ ['size', None, 'absolute', 1, 1e9],
+ ]
+ },
'repo_objects': {
'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
'lines': [
- ["objects", None, "absolute", 1, 1],
- ["pinned", None, "absolute", 1, 1],
- ["recursive_pins", None, "absolute", 1, 1]
- ]},
+ ['objects', None, 'absolute', 1, 1],
+ ['pinned', None, 'absolute', 1, 1],
+ ['recursive_pins', None, 'absolute', 1, 1]
+ ]
+ }
}
-SI_zeroes = {'k': 3, 'm': 6, 'g': 9, 't': 12,
- 'p': 15, 'e': 18, 'z': 21, 'y': 24}
+SI_zeroes = {
+ 'k': 3,
+ 'm': 6,
+ 'g': 9,
+ 't': 12,
+ 'p': 15,
+ 'e': 18,
+ 'z': 21,
+ 'y': 24
+}
class Service(UrlService):
@@ -60,6 +73,7 @@ class Service(UrlService):
self.order = ORDER
self.definitions = CHARTS
self.__storage_max = None
+ self.do_pinapi = self.configuration.get('pinapi')
def _get_json(self, sub_url):
"""
@@ -73,7 +87,7 @@ class Service(UrlService):
@staticmethod
def _recursive_pins(keys):
- return len([k for k in keys if keys[k]["Type"] == b"recursive"])
+ return sum(1 for k in keys if keys[k]['Type'] == b'recursive')
@staticmethod
def _dehumanize(store_max):
@@ -93,7 +107,7 @@ class Service(UrlService):
def _storagemax(self, store_cfg):
if self.__storage_max is None:
- self.__storage_max = self._dehumanize(store_cfg['StorageMax'])
+ self.__storage_max = self._dehumanize(store_cfg)
return self.__storage_max
def _get_data(self):
@@ -106,13 +120,15 @@ class Service(UrlService):
'/api/v0/stats/bw':
[('in', 'RateIn', int), ('out', 'RateOut', int)],
'/api/v0/swarm/peers':
- [('peers', 'Strings', len)],
+ [('peers', 'Peers', len)],
'/api/v0/stats/repo':
- [('size', 'RepoSize', int), ('objects', 'NumObjects', int)],
- '/api/v0/pin/ls':
- [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)],
- '/api/v0/config/show': [('avail', 'Datastore', self._storagemax)]
+ [('size', 'RepoSize', int), ('objects', 'NumObjects', int), ('avail', 'StorageMax', self._storagemax)],
}
+ if self.do_pinapi:
+ cfg.update({
+ '/api/v0/pin/ls':
+ [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)]
+ })
r = dict()
for suburl in cfg:
in_json = self._get_json(suburl)
diff --git a/conf.d/python.d/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
index c247c1b7a..e3df0f6bb 100644
--- a/conf.d/python.d/ipfs.conf
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -64,11 +64,16 @@
# Additionally to the above, ipfs also supports the following:
#
# url: 'URL' # URL to the IPFS API
+# pinapi: no # Set status of IPFS pinned object polling
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/3874
+# # resulting in very high CPU Usage
#
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)
localhost:
- name : 'local'
- url : 'http://localhost:5001'
+ name : 'local'
+ url : 'http://localhost:5001'
+ pinapi : no
diff --git a/collectors/python.d.plugin/isc_dhcpd/Makefile.inc b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
new file mode 100644
index 000000000..44343fc9d
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += isc_dhcpd/isc_dhcpd.chart.py
+dist_pythonconfig_DATA += isc_dhcpd/isc_dhcpd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += isc_dhcpd/README.md isc_dhcpd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
new file mode 100644
index 000000000..334d86e33
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -0,0 +1,34 @@
+# isc_dhcpd
+
+Module monitor leases database to show all active leases for given pools.
+
+**Requirements:**
+ * dhcpd leases file MUST BE readable by netdata
+ * pools MUST BE in CIDR format
+
+It produces:
+
+1. **Pools utilization** Aggregate chart for all pools.
+ * utilization in percent
+
+2. **Total leases**
+ * leases (overall number of leases for all pools)
+
+3. **Active leases** for every pools
+ * leases (number of active leases in pool)
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ leases_path : '/var/lib/dhcp/dhcpd.leases'
+ pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+```
+
+In case of python2 you need to install `py2-ipaddress` to make plugin work.
+The module will not work If no configuration is given.
+
+---
diff --git a/python.d/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
index eb6338452..a9f274949 100644
--- a/python.d/isc_dhcpd.chart.py
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: isc dhcpd lease netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
import re
@@ -25,17 +26,18 @@ ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total']
CHARTS = {
'pools_utilization': {
- 'options': [None, 'Pools Utilization', '%', 'utilization',
- 'isc_dhcpd.utilization', 'line'],
- 'lines': []},
+ 'options': [None, 'Pools Utilization', '%', 'utilization', 'isc_dhcpd.utilization', 'line'],
+ 'lines': []
+ },
'pools_active_leases': {
- 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases',
- 'isc_dhcpd.active_leases', 'line'],
- 'lines': []},
+ 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases', 'isc_dhcpd.active_leases', 'line'],
+ 'lines': []
+ },
'leases_total': {
- 'options': [None, 'All Active Leases', 'leases', 'active leases',
- 'isc_dhcpd.leases_total', 'line'],
- 'lines': [['leases_total', 'leases', 'absolute']],
+ 'options': [None, 'All Active Leases', 'leases', 'active leases', 'isc_dhcpd.leases_total', 'line'],
+ 'lines': [
+ ['leases_total', 'leases', 'absolute']
+ ],
'variables': [
['leases_size']
]
diff --git a/conf.d/python.d/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
index 4a4c4a5e3..4a4c4a5e3 100644
--- a/conf.d/python.d/isc_dhcpd.conf
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
diff --git a/collectors/python.d.plugin/linux_power_supply/Makefile.inc b/collectors/python.d.plugin/linux_power_supply/Makefile.inc
new file mode 100644
index 000000000..1864ba524
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += linux_power_supply/linux_power_supply.chart.py
+dist_pythonconfig_DATA += linux_power_supply/linux_power_supply.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += linux_power_supply/README.md linux_power_supply/Makefile.inc
+
diff --git a/collectors/python.d.plugin/linux_power_supply/README.md b/collectors/python.d.plugin/linux_power_supply/README.md
new file mode 100644
index 000000000..5cfbe41ce
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/README.md
@@ -0,0 +1,67 @@
+# linux\_power\_supply
+
+This module monitors variosu metrics reported by power supply drivers
+on Linux. This allows tracking and alerting on things like remaining
+battery capacity.
+
+Depending on the uderlying driver, it may provide the following charts
+and metrics:
+
+1. Capacity: The power supply capacity expressed as a percentage.
+ * capacity\_now
+
+2. Charge: The charge for the power supply, expressed as microamphours.
+ * charge\_full\_design
+ * charge\_full
+ * charge\_now
+ * charge\_empty
+ * charge\_empty\_design
+
+3. Energy: The energy for the power supply, expressed as microwatthours.
+ * energy\_full\_design
+ * energy\_full
+ * energy\_now
+ * energy\_empty
+ * energy\_empty\_design
+
+2. Voltage: The voltage for the power supply, expressed as microvolts.
+ * voltage\_max\_design
+ * voltage\_max
+ * voltage\_now
+ * voltage\_min
+ * voltage\_min\_design
+
+### configuration
+
+Sample:
+
+```yaml
+battery:
+ supply: 'BAT0'
+ charts: 'capacity charge energy voltage'
+```
+
+The `supply` key specifies the name of the power supply device to monitor.
+You can use `ls /sys/class/power_supply` to get a list of such devices
+on your system.
+
+The `charts` key is a space separated list of which charts to try
+to display. It defaults to trying to display everything.
+
+### notes
+
+* Most drivers provide at least the first chart. Battery powered ACPI
+compliant systems (like most laptops) provide all but the third, but do
+not provide all of the metrics for each chart.
+
+* Current, energy, and voltages are reported with a _very_ high precision
+by the power\_supply framework. Usually, this is far higher than the
+actual hardware supports reporting, so expect to see changes in these
+charts jump instead of scaling smoothly.
+
+* If `max` or `full` attribute is defined by the driver, but not a
+corresponding `min or `empty` attribute, then netdata will still provide
+the corresponding `min` or `empty`, which will then always read as zero.
+This way, alerts which match on these will still work.
+
+---
diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py
new file mode 100644
index 000000000..71d834e5d
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Description: Linux power_supply netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+
+import os
+import platform
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# Everything except percentages is reported as µ units.
+PRECISION = 10 ** 6
+
+# A priority of 90000 places us next to the other PSU related stuff.
+PRIORITY = 90000
+
+# We add our charts dynamically when we probe for the device attributes,
+# so these are empty by default.
+ORDER = []
+
+CHARTS = {}
+
+
+def get_capacity_chart(syspath):
+ # Capacity is measured in percent. We track one value.
+ options = [None, 'Capacity', '%', 'power_supply', 'power_supply.capacity', 'line']
+ lines = list()
+ attr_now = 'capacity'
+ if get_sysfs_value(os.path.join(syspath, attr_now)) is not None:
+ lines.append([attr_now, attr_now, 'absolute', 1, 1])
+ return {'capacity': {'options': options, 'lines': lines}}, [attr_now]
+ else:
+ return None, None
+
+
+def get_generic_chart(syspath, name, unit, maxname, minname):
+ # Used to generate charts for energy, charge, and voltage.
+ options = [None, name.title(), unit, 'power_supply', 'power_supply.{0}'.format(name), 'line']
+ lines = list()
+ attrlist = list()
+ attr_max_design = '{0}_{1}_design'.format(name, maxname)
+ attr_max = '{0}_{1}'.format(name, maxname)
+ attr_now = '{0}_now'.format(name)
+ attr_min = '{0}_{1}'.format(name, minname)
+ attr_min_design = '{0}_{1}_design'.format(name, minname)
+ if get_sysfs_value(os.path.join(syspath, attr_now)) is not None:
+ lines.append([attr_now, attr_now, 'absolute', 1, PRECISION])
+ attrlist.append(attr_now)
+ else:
+ return None, None
+ if get_sysfs_value(os.path.join(syspath, attr_max)) is not None:
+ lines.insert(0, [attr_max, attr_max, 'absolute', 1, PRECISION])
+ lines.append([attr_min, attr_min, 'absolute', 1, PRECISION])
+ attrlist.append(attr_max)
+ attrlist.append(attr_min)
+ elif get_sysfs_value(os.path.join(syspath, attr_min)) is not None:
+ lines.append([attr_min, attr_min, 'absolute', 1, PRECISION])
+ attrlist.append(attr_min)
+ if get_sysfs_value(os.path.join(syspath, attr_max_design)) is not None:
+ lines.insert(0, [attr_max_design, attr_max_design, 'absolute', 1, PRECISION])
+ lines.append([attr_min_design, attr_min_design, 'absolute', 1, PRECISION])
+ attrlist.append(attr_max_design)
+ attrlist.append(attr_min_design)
+ elif get_sysfs_value(os.path.join(syspath, attr_min_design)) is not None:
+ lines.append([attr_min_design, attr_min_design, 'absolute', 1, PRECISION])
+ attrlist.append(attr_min_design)
+ return {name: {'options': options, 'lines': lines}}, attrlist
+
+
+def get_charge_chart(syspath):
+ # Charge is measured in microamphours. We track up to five
+ # attributes.
+ return get_generic_chart(syspath, 'charge', 'µAh', 'full', 'empty')
+
+
+def get_energy_chart(syspath):
+ # Energy is measured in microwatthours. We track up to five
+ # attributes.
+ return get_generic_chart(syspath, 'energy', 'µWh', 'full', 'empty')
+
+
+def get_voltage_chart(syspath):
+ # Voltage is measured in microvolts. We track up to five attributes.
+ return get_generic_chart(syspath, 'voltage', 'µV', 'min', 'max')
+
+
+# This is a list of functions for generating charts. Used below to save
+# a bit of code (and to make it a bit easier to add new charts).
+GET_CHART = {
+ 'capacity': get_capacity_chart,
+ 'charge': get_charge_chart,
+ 'energy': get_energy_chart,
+ 'voltage': get_voltage_chart
+}
+
+
+# This opens the specified file and returns the value in it or None if
+# the file doesn't exist.
+def get_sysfs_value(filepath):
+ try:
+ with open(filepath, 'r') as datasource:
+ return int(datasource.read())
+ except (OSError, IOError):
+ return None
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.definitions = dict()
+ self.order = list()
+ self.attrlist = list()
+ self.supply = self.configuration.get('supply', None)
+ if self.supply is not None:
+ self.syspath = '/sys/class/power_supply/{0}'.format(self.supply)
+ self.types = self.configuration.get('charts', 'capacity').split()
+
+ def check(self):
+ if platform.system() != 'Linux':
+ self.error('Only supported on Linux.')
+ return False
+ if self.supply is None:
+ self.error('No power supply specified for monitoring.')
+ return False
+ if not self.types:
+ self.error('No attributes requested for monitoring.')
+ return False
+ if not os.access(self.syspath, os.R_OK):
+ self.error('Unable to access {0}'.format(self.syspath))
+ return False
+ return self.create_charts()
+
+ def create_charts(self):
+ chartset = set(GET_CHART).intersection(set(self.types))
+ if not chartset:
+ self.error('No valid attributes requested for monitoring.')
+ return False
+ charts = dict()
+ attrlist = list()
+ for item in chartset:
+ chart, attrs = GET_CHART[item](self.syspath)
+ if chart is not None:
+ charts.update(chart)
+ attrlist.extend(attrs)
+ if len(charts) == 0:
+ self.error('No charts can be created.')
+ return False
+ self.definitions.update(charts)
+ self.order.extend(sorted(charts))
+ self.attrlist.extend(attrlist)
+ return True
+
+ def _get_data(self):
+ data = dict()
+ for attr in self.attrlist:
+ attrpath = os.path.join(self.syspath, attr)
+ if attr.endswith(('_min', '_min_design', '_empty', '_empty_design')):
+ data[attr] = get_sysfs_value(attrpath) or 0
+ else:
+ data[attr] = get_sysfs_value(attrpath)
+ return data
diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
new file mode 100644
index 000000000..3cb610f7f
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
@@ -0,0 +1,81 @@
+# netdata python.d.plugin configuration for linux_power_supply
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_everye
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# In addition to the above parameters, linux_power_supply also supports
+# the following extra parameters.
+#
+# supply: '' # the name of the power supply to monitor
+# charts: 'capacity' # a space separated list of the charts to try
+# # and generate valid charts are 'capacity',
+# # 'charge', 'current', and 'voltage'
+#
+# Note that linux_power_supply will not automatically detect power
+# supplies in the system, you have to manually specify which ones you
+# want it to monitor.
+#
+# The following config will work to monitor the first battery in most
+# ACPI compliant battery powered systems (such as most laptops).
+#
+# battery:
+# name: battery
+# supply: BAT0
diff --git a/collectors/python.d.plugin/litespeed/Makefile.inc b/collectors/python.d.plugin/litespeed/Makefile.inc
new file mode 100644
index 000000000..5dd645020
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += litespeed/litespeed.chart.py
+dist_pythonconfig_DATA += litespeed/litespeed.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += litespeed/README.md litespeed/Makefile.inc
+
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
new file mode 100644
index 000000000..d1482f33c
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -0,0 +1,47 @@
+# litespeed
+
+Module monitor litespeed web server performance metrics.
+
+It produces:
+
+1. **Network Throughput HTTP** in kilobits/s
+ * in
+ * out
+
+2. **Network Throughput HTTPS** in kilobits/s
+ * in
+ * out
+
+3. **Connections HTTP** in connections
+ * free
+ * used
+
+4. **Connections HTTPS** in connections
+ * free
+ * used
+
+5. **Requests** in requests/s
+ * requests
+
+6. **Requests In Processing** in requests
+ * processing
+
+7. **Public Cache Hits** in hits/s
+ * hits
+
+8. **Private Cache Hits** in hits/s
+ * hits
+
+9. **Static Hits** in hits/s
+ * hits
+
+
+### configuration
+```yaml
+local:
+ path : 'PATH'
+```
+
+If no configuration is given, module will use "/tmp/lshttpd/".
+
+---
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
new file mode 100644
index 000000000..efdc6869c
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
@@ -0,0 +1,186 @@
+# -*- coding: utf-8 -*-
+# Description: litespeed netdata python.d module
+# Author: Ilya Maschenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import glob
+import re
+import os
+
+from collections import namedtuple
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+update_every = 10
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'net_throughput_http', 'net_throughput_https', # net throughput
+ 'connections_http', 'connections_https', # connections
+ 'requests', 'requests_processing', # requests
+ 'pub_cache_hits', 'private_cache_hits', # cache
+ 'static_hits' # static
+]
+
+CHARTS = {
+ 'net_throughput_http': {
+ 'options': [None, 'Network Throughput HTTP', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['bps_in', 'in', 'absolute'],
+ ['bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'net_throughput_https': {
+ 'options': [None, 'Network Throughput HTTPS', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['ssl_bps_in', 'in', 'absolute'],
+ ['ssl_bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'connections_http': {
+ 'options': [None, 'Connections HTTP', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['conn_free', 'free', 'absolute'],
+ ['conn_used', 'used', 'absolute']
+ ]
+ },
+ 'connections_https': {
+ 'options': [None, 'Connections HTTPS', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['ssl_conn_free', 'free', 'absolute'],
+ ['ssl_conn_used', 'used', 'absolute']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'litespeed.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'absolute', 1, 100]
+ ]
+ },
+ 'requests_processing': {
+ 'options': [None, 'Requests In Processing', 'requests', 'requests', 'litespeed.requests_processing', 'line'],
+ 'lines': [
+ ['requests_processing', 'processing', 'absolute']
+ ]
+ },
+ 'pub_cache_hits': {
+ 'options': [None, 'Public Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['pub_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'private_cache_hits': {
+ 'options': [None, 'Private Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['private_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'static_hits': {
+ 'options': [None, 'Static Hits', 'hits/s', 'static', 'litespeed.static', 'line'],
+ 'lines': [
+ ['static_hits', 'hits', 'absolute', 1, 100]
+ ]
+ }
+}
+
+t = namedtuple('T', ['key', 'id', 'mul'])
+
+T = [
+ t('BPS_IN', 'bps_in', 8),
+ t('BPS_OUT', 'bps_out', 8),
+ t('SSL_BPS_IN', 'ssl_bps_in', 8),
+ t('SSL_BPS_OUT', 'ssl_bps_out', 8),
+ t('REQ_PER_SEC', 'requests', 100),
+ t('REQ_PROCESSING', 'requests_processing', 1),
+ t('PUB_CACHE_HITS_PER_SEC', 'pub_cache_hits', 100),
+ t('PRIVATE_CACHE_HITS_PER_SEC', 'private_cache_hits', 100),
+ t('STATIC_HITS_PER_SEC', 'static_hits', 100),
+ t('PLAINCONN', 'conn_used', 1),
+ t('AVAILCONN', 'conn_free', 1),
+ t('SSLCONN', 'ssl_conn_used', 1),
+ t('AVAILSSL', 'ssl_conn_free', 1),
+]
+
+RE = re.compile(r'([A-Z_]+): ([0-9.]+)')
+
+ZERO_DATA = {
+ 'bps_in': 0,
+ 'bps_out': 0,
+ 'ssl_bps_in': 0,
+ 'ssl_bps_out': 0,
+ 'requests': 0,
+ 'requests_processing': 0,
+ 'pub_cache_hits': 0,
+ 'private_cache_hits': 0,
+ 'static_hits': 0,
+ 'conn_used': 0,
+ 'conn_free': 0,
+ 'ssl_conn_used': 0,
+ 'ssl_conn_free': 0,
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.path = self.configuration.get('path', '/tmp/lshttpd/')
+ self.files = list()
+
+ def check(self):
+ if not self.path:
+ self.error('"path" not specified')
+ return False
+
+ fs = glob.glob(os.path.join(self.path, '.rtreport*'))
+
+ if not fs:
+ self.error('"{0}" has no "rtreport" files or dir is not readable'.format(self.path))
+ return None
+
+ self.debug('stats files:', fs)
+
+ for f in fs:
+ if not is_readable_file(f):
+ self.error('{0} is not readable'.format(f))
+ continue
+ self.files.append(f)
+
+ return bool(self.files)
+
+ def get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = dict(ZERO_DATA)
+
+ for f in self.files:
+ try:
+ with open(f) as b:
+ lines = b.readlines()
+ except (OSError, IOError) as err:
+ self.error(err)
+ return None
+ else:
+ parse_file(data, lines)
+
+ return data
+
+
+def parse_file(data, lines):
+ for line in lines:
+ if not line.startswith(('BPS_IN:', 'MAXCONN:', 'REQ_RATE []:')):
+ continue
+ m = dict(RE.findall(line))
+ for v in T:
+ if v.key in m:
+ data[v.id] += float(m[v.key]) * v.mul
+
+
+def is_readable_file(v):
+ return os.path.isfile(v) and os.access(v, os.R_OK)
diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf
new file mode 100644
index 000000000..17d0f690e
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.conf
@@ -0,0 +1,74 @@
+# netdata python.d.plugin configuration for litespeed
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, lightspeed also supports the following:
+#
+# path: 'PATH' # path to lightspeed stats files directory
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ path : '/tmp/lshttpd/'
diff --git a/collectors/python.d.plugin/logind/Makefile.inc b/collectors/python.d.plugin/logind/Makefile.inc
new file mode 100644
index 000000000..adadab120
--- /dev/null
+++ b/collectors/python.d.plugin/logind/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += logind/logind.chart.py
+dist_pythonconfig_DATA += logind/logind.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += logind/README.md logind/Makefile.inc
+
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
new file mode 100644
index 000000000..8f8670d4a
--- /dev/null
+++ b/collectors/python.d.plugin/logind/README.md
@@ -0,0 +1,54 @@
+# logind
+
+This module monitors active sessions, users, and seats tracked by systemd-logind or elogind.
+
+It provides the following charts:
+
+1. **Sessions** Tracks the total number of sessions.
+ * Graphical: Local graphical sessions (running X11, or Wayland, or something else).
+ * Console: Local console sessions.
+ * Remote: Remote sessions.
+
+2. **Users** Tracks total number of unique user logins of each type.
+ * Graphical
+ * Console
+ * Remote
+
+3. **Seats** Total number of seats in use.
+ * Seats
+
+### configuration
+
+This module needs no configuration. Just make sure the netdata user
+can run the `loginctl` command and get a session list without having to
+specify a path.
+
+This will work with any command that can output data in the _exact_
+same format as `loginctl list-sessions --no-legend`. If you have some
+other command you want to use that outputs data in this format, you can
+specify it using the `command` key like so:
+
+```yaml
+command: '/path/to/other/command'
+```
+
+### notes
+
+* This module's ability to track logins is dependent on what PAM services
+are configured to register sessions with logind. In particular, for
+most systems, it will only track TTY logins, local desktop logins,
+and logins through remote shell connections.
+
+* The users chart counts _usernames_ not UID's. This is potentially
+important in configurations where multiple users have the same UID.
+
+* The users chart counts any given user name up to once for _each_ type
+of login. So if the same user has a graphical and a console login on a
+system, they will show up once in the graphical count, and once in the
+console count.
+
+* Because the data collection process is rather expensive, this plugin
+is currently disabled by default, and needs to be explicitly enabled in
+`/etc/netdata/python.d.conf` before it will run.
+
+---
diff --git a/collectors/python.d.plugin/logind/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py
new file mode 100644
index 000000000..bfc486c7f
--- /dev/null
+++ b/collectors/python.d.plugin/logind/logind.chart.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+# Description: logind netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+priority = 59999
+disabled_by_default = True
+
+ORDER = ['sessions', 'users', 'seats']
+
+CHARTS = {
+ 'sessions': {
+ 'options': [None, 'Logind Sessions', 'sessions', 'sessions', 'logind.sessions', 'stacked'],
+ 'lines': [
+ ['sessions_graphical', 'Graphical', 'absolute', 1, 1],
+ ['sessions_console', 'Console', 'absolute', 1, 1],
+ ['sessions_remote', 'Remote', 'absolute', 1, 1]
+ ]
+ },
+ 'users': {
+ 'options': [None, 'Logind Users', 'users', 'users', 'logind.users', 'stacked'],
+ 'lines': [
+ ['users_graphical', 'Graphical', 'absolute', 1, 1],
+ ['users_console', 'Console', 'absolute', 1, 1],
+ ['users_remote', 'Remote', 'absolute', 1, 1]
+ ]
+ },
+ 'seats': {
+ 'options': [None, 'Logind Seats', 'seats', 'seats', 'logind.seats', 'line'],
+ 'lines': [
+ ['seats', 'Active Seats', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.command = 'loginctl list-sessions --no-legend'
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ ret = {
+ 'sessions_graphical': 0,
+ 'sessions_console': 0,
+ 'sessions_remote': 0,
+ }
+ users = {
+ 'graphical': list(),
+ 'console': list(),
+ 'remote': list()
+ }
+ seats = list()
+ data = self._get_raw_data()
+
+ for item in data:
+ fields = item.split()
+ if len(fields) == 3:
+ users['remote'].append(fields[2])
+ ret['sessions_remote'] += 1
+ elif len(fields) == 4:
+ users['graphical'].append(fields[2])
+ ret['sessions_graphical'] += 1
+ seats.append(fields[3])
+ elif len(fields) == 5:
+ users['console'].append(fields[2])
+ ret['sessions_console'] += 1
+ seats.append(fields[3])
+
+ ret['users_graphical'] = len(set(users['graphical']))
+ ret['users_console'] = len(set(users['console']))
+ ret['users_remote'] = len(set(users['remote']))
+ ret['seats'] = len(set(seats))
+
+ return ret
diff --git a/collectors/python.d.plugin/logind/logind.conf b/collectors/python.d.plugin/logind/logind.conf
new file mode 100644
index 000000000..0623493de
--- /dev/null
+++ b/collectors/python.d.plugin/logind/logind.conf
@@ -0,0 +1,62 @@
+# netdata python.d.plugin configuration for logind
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
diff --git a/collectors/python.d.plugin/mdstat/Makefile.inc b/collectors/python.d.plugin/mdstat/Makefile.inc
new file mode 100644
index 000000000..5125a271b
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mdstat/mdstat.chart.py
+dist_pythonconfig_DATA += mdstat/mdstat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mdstat/README.md mdstat/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mdstat/README.md b/collectors/python.d.plugin/mdstat/README.md
new file mode 100644
index 000000000..1ff8f7dab
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/README.md
@@ -0,0 +1,26 @@
+# mdstat
+
+Module monitor /proc/mdstat
+
+It produces:
+
+1. **Health** Number of failed disks in every array (aggregate chart).
+
+2. **Disks stats**
+ * total (number of devices array ideally would have)
+ * inuse (number of devices currently are in use)
+
+3. **Current status**
+ * resync in percent
+ * recovery in percent
+ * reshape in percent
+ * check in percent
+
+4. **Operation status** (if resync/recovery/reshape/check is active)
+ * finish in minutes
+ * speed in megabytes/s
+
+### configuration
+No configuration is needed.
+
+---
diff --git a/collectors/python.d.plugin/mdstat/mdstat.chart.py b/collectors/python.d.plugin/mdstat/mdstat.chart.py
new file mode 100644
index 000000000..b7306b6a7
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/mdstat.chart.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# Description: mdstat netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from collections import defaultdict
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+MDSTAT = '/proc/mdstat'
+MISMATCH_CNT = '/sys/block/{0}/md/mismatch_cnt'
+
+ORDER = ['mdstat_health']
+
+CHARTS = {
+ 'mdstat_health': {
+ 'options': [None, 'Faulty Devices In MD', 'failed disks', 'health', 'md.health', 'line'],
+ 'lines': []
+ }
+}
+
+RE_DISKS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\['
+ r'(?P<total_disks>[0-9]+)/'
+ r'(?P<inuse_disks>[0-9]+)\]')
+
+RE_STATUS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ '
+ r'(?P<operation>[a-z]+) =[ ]{1,2}'
+ r'(?P<operation_status>[0-9.]+).+finish='
+ r'(?P<finish_in>([0-9.]+))min speed='
+ r'(?P<speed>[0-9]+)')
+
+
+def md_charts(name):
+ order = [
+ '{0}_disks'.format(name),
+ '{0}_operation'.format(name),
+ '{0}_mismatch_cnt'.format(name),
+ '{0}_finish'.format(name),
+ '{0}_speed'.format(name)
+ ]
+
+ charts = dict()
+ charts[order[0]] = {
+ 'options': [None, 'Disks Stats', 'disks', name, 'md.disks', 'stacked'],
+ 'lines': [
+ ['{0}_total_disks'.format(name), 'total', 'absolute'],
+ ['{0}_inuse_disks'.format(name), 'inuse', 'absolute']
+ ]
+ }
+
+ charts[order[1]] = {
+ 'options': [None, 'Current Status', 'percent', name, 'md.status', 'line'],
+ 'lines': [
+ ['{0}_resync'.format(name), 'resync', 'absolute', 1, 100],
+ ['{0}_recovery'.format(name), 'recovery', 'absolute', 1, 100],
+ ['{0}_reshape'.format(name), 'reshape', 'absolute', 1, 100],
+ ['{0}_check'.format(name), 'check', 'absolute', 1, 100],
+ ]
+ }
+
+ charts[order[2]] = {
+ 'options': [None, 'Mismatch Count', 'unsynchronized blocks', name, 'md.mismatch_cnt', 'line'],
+ 'lines': [
+ ['{0}_mismatch_cnt'.format(name), 'count', 'absolute']
+ ]
+ }
+
+ charts[order[3]] = {
+ 'options': [None, 'Approximate Time Until Finish', 'seconds', name, 'md.rate', 'line'],
+ 'lines': [
+ ['{0}_finish_in'.format(name), 'finish in', 'absolute', 1, 1000]
+ ]
+ }
+
+ charts[order[4]] = {
+ 'options': [None, 'Operation Speed', 'KB/s', name, 'md.rate', 'line'],
+ 'lines': [
+ ['{0}_speed'.format(name), 'speed', 'absolute', 1, 1000]
+ ]
+ }
+
+ return order, charts
+
+
+class MD:
+ def __init__(self, raw_data):
+ self.name = raw_data['array']
+ self.d = raw_data
+
+ def data(self):
+ rv = {
+ 'total_disks': self.d['total_disks'],
+ 'inuse_disks': self.d['inuse_disks'],
+ 'health': int(self.d['total_disks']) - int(self.d['inuse_disks']),
+ 'resync': 0,
+ 'recovery': 0,
+ 'reshape': 0,
+ 'check': 0,
+ 'finish_in': 0,
+ 'speed': 0,
+ }
+
+ v = read_lines(MISMATCH_CNT.format(self.name))
+ if v:
+ rv['mismatch_cnt'] = v
+
+ if self.d.get('operation'):
+ rv[self.d['operation']] = float(self.d['operation_status']) * 100
+ rv['finish_in'] = float(self.d['finish_in']) * 1000 * 60
+ rv['speed'] = float(self.d['speed']) * 1000
+
+ return dict(('{0}_{1}'.format(self.name, k), v) for k, v in rv.items())
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.mds = list()
+
+ @staticmethod
+ def get_mds():
+ raw = read_lines(MDSTAT)
+
+ if not raw:
+ return None
+
+ return find_mds(raw)
+
+ def get_data(self):
+ """
+ Parse data from _get_raw_data()
+ :return: dict
+ """
+ mds = self.get_mds()
+
+ if not mds:
+ return None
+
+ data = dict()
+ for md in mds:
+ if md.name not in self.mds:
+ self.mds.append(md.name)
+ self.add_new_md_charts(md.name)
+ data.update(md.data())
+ return data
+
+ def check(self):
+ if not self.get_mds():
+ self.error('Failed to read data from {0} or there is no active arrays'.format(MDSTAT))
+ return False
+ return True
+
+ def add_new_md_charts(self, name):
+ order, charts = md_charts(name)
+
+ self.charts['mdstat_health'].add_dimension(['{0}_health'.format(name), name])
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dims = charts[chart_name]['lines']
+
+ chart = self.charts.add_chart(params)
+ for dim in dims:
+ chart.add_dimension(dim)
+
+
+def find_mds(raw_data):
+ data = defaultdict(str)
+ counter = 1
+
+ for row in (elem.strip() for elem in raw_data):
+ if not row:
+ counter += 1
+ continue
+ data[counter] = ' '.join([data[counter], row])
+
+ mds = list()
+
+ for v in data.values():
+ m = RE_DISKS.search(v)
+
+ if not m:
+ continue
+
+ d = m.groupdict()
+
+ m = RE_STATUS.search(v)
+ if m:
+ d.update(m.groupdict())
+
+ mds.append(MD(d))
+
+ return sorted(mds, key=lambda md: md.name)
+
+
+def read_lines(path):
+ try:
+ with open(path) as f:
+ return f.readlines()
+ except (IOError, OSError):
+ return None
diff --git a/conf.d/python.d/mdstat.conf b/collectors/python.d.plugin/mdstat/mdstat.conf
index 66a2f153c..66a2f153c 100644
--- a/conf.d/python.d/mdstat.conf
+++ b/collectors/python.d.plugin/mdstat/mdstat.conf
diff --git a/collectors/python.d.plugin/megacli/Makefile.inc b/collectors/python.d.plugin/megacli/Makefile.inc
new file mode 100644
index 000000000..83680d723
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += megacli/megacli.chart.py
+dist_pythonconfig_DATA += megacli/megacli.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += megacli/README.md megacli/Makefile.inc
+
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
new file mode 100644
index 000000000..d288a6353
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -0,0 +1,48 @@
+# megacli
+
+Module collects adapter, physical drives and battery stats.
+
+**Requirements:**
+ * `megacli` program
+ * `sudo` program
+ * `netdata` user needs to be able to be able to sudo the `megacli` program without password
+
+To grab stats it executes:
+ * `sudo -n megacli -LDPDInfo -aAll`
+ * `sudo -n megacli -AdpBbuCmd -a0`
+
+
+It produces:
+
+1. **Adapter State**
+
+2. **Physical Drives Media Errors**
+
+3. **Physical Drives Predictive Failures**
+
+4. **Battery Relative State of Charge**
+
+5. **Battery Cycle Count**
+
+### prerequisite
+This module uses `megacli` which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `megacli` as root without password.
+
+Add to `sudoers`:
+
+ netdata ALL=(root) NOPASSWD: /path/to/megacli
+
+### configuration
+
+**megacli** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+```yaml
+megacli: yes
+```
+
+Battery stats disabled by default. To enable them modify `megacli.conf`.
+```yaml
+do_battery: yes
+```
+
+---
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
new file mode 100644
index 000000000..41a1079f6
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+# Description: megacli netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+
+disabled_by_default = True
+
+update_every = 5
+
+
+def adapter_charts(ads):
+ order = [
+ 'adapter_degraded',
+ ]
+
+ def dims(ad):
+ return [['adapter_{0}_degraded'.format(a.id), 'adapter {0}'.format(a.id)] for a in ad]
+
+ charts = {
+ 'adapter_degraded': {
+ 'options': [None, 'Adapter State', 'is degraded', 'adapter', 'megacli.adapter_degraded', 'line'],
+ 'lines': dims(ads)
+ },
+ }
+
+ return order, charts
+
+
+def pd_charts(pds):
+ order = [
+ 'pd_media_error',
+ 'pd_predictive_failure',
+ ]
+
+ def dims(k, pd):
+ return [['slot_{0}_{1}'.format(p.id, k), 'slot {0}'.format(p.id), 'incremental'] for p in pd]
+
+ charts = {
+ 'pd_media_error': {
+ 'options': [None, 'Physical Drives Media Errors', 'errors/s', 'pd', 'megacli.pd_media_error', 'line'],
+ 'lines': dims('media_error', pds)
+ },
+ 'pd_predictive_failure': {
+ 'options': [None, 'Physical Drives Predictive Failures', 'failures/s', 'pd',
+ 'megacli.pd_predictive_failure', 'line'],
+ 'lines': dims('predictive_failure', pds)
+ }
+ }
+
+ return order, charts
+
+
+def battery_charts(bats):
+ order = list()
+ charts = dict()
+
+ for b in bats:
+ order.append('bbu_{0}_relative_charge'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_relative_charge'.format(b.id): {
+ 'options': [None, 'Relative State of Charge', '%', 'battery',
+ 'megacli.bbu_relative_charge', 'line'],
+ 'lines': [
+ ['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ for b in bats:
+ order.append('bbu_{0}_cycle_count'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_cycle_count'.format(b.id): {
+ 'options': [None, 'Cycle Count', 'cycle count', 'battery', 'megacli.bbu_cycle_count', 'line'],
+ 'lines': [
+ ['bbu_{0}_cycle_count'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ return order, charts
+
+
+RE_ADAPTER = re.compile(
+ r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z]+)'
+)
+
+RE_VD = re.compile(
+ r'Slot Number: ([0-9]+) Media Error Count: ([0-9]+) Predictive Failure Count: ([0-9]+)'
+)
+
+RE_BATTERY = re.compile(
+ r'BBU Capacity Info for Adapter: ([0-9]+) Relative State of Charge: ([0-9]+) % Cycle Count: ([0-9]+)'
+)
+
+
+def find_adapters(d):
+ keys = ('Adapter #', 'State')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [Adapter(*v) for v in RE_ADAPTER.findall(d)]
+
+
+def find_pds(d):
+ keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [PD(*v) for v in RE_VD.findall(d)]
+
+
+def find_batteries(d):
+ keys = ('BBU Capacity Info for Adapter', 'Relative State of Charge', 'Cycle Count')
+ d = ' '.join(v.strip() for v in d if v.strip().startswith(keys))
+ return [Battery(*v) for v in RE_BATTERY.findall(d)]
+
+
+class Adapter:
+ def __init__(self, n, state):
+ self.id = n
+ self.state = int(state == 'Degraded')
+
+ def data(self):
+ return {
+ 'adapter_{0}_degraded'.format(self.id): self.state,
+ }
+
+
+class PD:
+ def __init__(self, n, media_err, predict_fail):
+ self.id = n
+ self.media_err = media_err
+ self.predict_fail = predict_fail
+
+ def data(self):
+ return {
+ 'slot_{0}_media_error'.format(self.id): self.media_err,
+ 'slot_{0}_predictive_failure'.format(self.id): self.predict_fail,
+ }
+
+
+class Battery:
+ def __init__(self, adapt_id, rel_charge, cycle_count):
+ self.id = adapt_id
+ self.rel_charge = rel_charge
+ self.cycle_count = cycle_count
+
+ def data(self):
+ return {
+ 'bbu_{0}_relative_charge'.format(self.id): self.rel_charge,
+ 'bbu_{0}_cycle_count'.format(self.id): self.cycle_count,
+ }
+
+
+# TODO: hardcoded sudo...
+class Megacli:
+ def __init__(self):
+ self.s = find_binary('sudo')
+ self.m = find_binary('megacli')
+ self.sudo_check = [self.s, '-n', '-v']
+ self.disk_info = [self.s, '-n', self.m, '-LDPDInfo', '-aAll', '-NoLog']
+ self.battery_info = [self.s, '-n', self.m, '-AdpBbuCmd', '-a0', '-NoLog']
+
+ def __bool__(self):
+ return bool(self.s and self.m)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.megacli = Megacli()
+ self.do_battery = self.configuration.get('do_battery')
+
+ def check_sudo(self):
+ err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+ return True
+
+ def check_disk_info(self):
+ d = self._get_raw_data(command=self.megacli.disk_info)
+ if not d:
+ return False
+
+ ads = find_adapters(d)
+ pds = find_pds(d)
+
+ if not (ads and pds):
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.disk_info)))
+ return False
+
+ o, c = adapter_charts(ads)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ o, c = pd_charts(pds)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def check_battery(self):
+ d = self._get_raw_data(command=self.megacli.battery_info)
+ if not d:
+ return False
+
+ bats = find_batteries(d)
+
+ if not bats:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.battery_info)))
+ return False
+
+ o, c = battery_charts(bats)
+ self.order.extend(o)
+ self.definitions.update(c)
+ return True
+
+ def check(self):
+ if not self.megacli:
+ self.error('can\'t locate "sudo" or "megacli" binary')
+ return None
+
+ if not (self.check_sudo() and self.check_disk_info()):
+ return False
+
+ if self.do_battery:
+ self.do_battery = self.check_battery()
+
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ data.update(self.get_adapter_pd_data())
+
+ if self.do_battery:
+ data.update(self.get_battery_data())
+
+ return data or None
+
+ def get_adapter_pd_data(self):
+ raw = self._get_raw_data(command=self.megacli.disk_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for a in find_adapters(raw):
+ data.update(a.data())
+
+ for p in find_pds(raw):
+ data.update(p.data())
+
+ return data
+
+ def get_battery_data(self):
+ raw = self._get_raw_data(command=self.megacli.battery_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for b in find_batteries(raw):
+ data.update(b.data())
+
+ return data
diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf
new file mode 100644
index 000000000..73afb2f7f
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.conf
@@ -0,0 +1,62 @@
+# netdata python.d.plugin configuration for megacli
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, megacli also supports the following:
+#
+# do_battery: yes/no # default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`).
+#
+# ----------------------------------------------------------------------
+# uncomment the line below to collect battery statistics
+# do_battery: yes
diff --git a/collectors/python.d.plugin/memcached/Makefile.inc b/collectors/python.d.plugin/memcached/Makefile.inc
new file mode 100644
index 000000000..e60357161
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += memcached/memcached.chart.py
+dist_pythonconfig_DATA += memcached/memcached.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += memcached/README.md memcached/Makefile.inc
+
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
new file mode 100644
index 000000000..3521c109d
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -0,0 +1,69 @@
+# memcached
+
+Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
+
+1. **Network** in kilobytes/s
+ * read
+ * written
+
+2. **Connections** per second
+ * current
+ * rejected
+ * total
+
+3. **Items** in cluster
+ * current
+ * total
+
+4. **Evicted and Reclaimed** items
+ * evicted
+ * reclaimed
+
+5. **GET** requests/s
+ * hits
+ * misses
+
+6. **GET rate** rate in requests/s
+ * rate
+
+7. **SET rate** rate in requests/s
+ * rate
+
+8. **DELETE** requests/s
+ * hits
+ * misses
+
+9. **CAS** requests/s
+ * hits
+ * misses
+ * bad value
+
+10. **Increment** requests/s
+ * hits
+ * misses
+
+11. **Decrement** requests/s
+ * hits
+ * misses
+
+12. **Touch** requests/s
+ * hits
+ * misses
+
+13. **Touch rate** rate in requests/s
+ * rate
+
+### configuration
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+```
+
+If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
+
+---
diff --git a/python.d/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
index 4f7adfa23..3c310ec69 100644
--- a/python.d/memcached.chart.py
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -1,11 +1,12 @@
# -*- coding: utf-8 -*-
# Description: memcached netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.SocketService import SocketService
# default module values (can be overridden per job in `config`)
-#update_every = 2
+# update_every = 2
priority = 60000
retries = 60
@@ -28,92 +29,106 @@ CHARTS = {
'lines': [
['avail', 'available', 'absolute', 1, 1048576],
['used', 'used', 'absolute', 1, 1048576]
- ]},
+ ]
+ },
'net': {
'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
'lines': [
['bytes_read', 'in', 'incremental', 8, 1024],
['bytes_written', 'out', 'incremental', -8, 1024]
- ]},
+ ]
+ },
'connections': {
'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
'lines': [
['curr_connections', 'current', 'incremental'],
['rejected_connections', 'rejected', 'incremental'],
['total_connections', 'total', 'incremental']
- ]},
+ ]
+ },
'items': {
'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
'lines': [
['curr_items', 'current', 'absolute'],
['total_items', 'total', 'absolute']
- ]},
+ ]
+ },
'evicted_reclaimed': {
'options': [None, 'Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
'lines': [
['reclaimed', 'reclaimed', 'absolute'],
['evictions', 'evicted', 'absolute']
- ]},
+ ]
+ },
'get': {
'options': [None, 'Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
'lines': [
['get_hits', 'hits', 'percent-of-absolute-row'],
['get_misses', 'misses', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'get_rate': {
'options': [None, 'Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
'lines': [
['cmd_get', 'rate', 'incremental']
- ]},
+ ]
+ },
'set_rate': {
'options': [None, 'Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
'lines': [
['cmd_set', 'rate', 'incremental']
- ]},
+ ]
+ },
'delete': {
'options': [None, 'Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
'lines': [
['delete_hits', 'hits', 'percent-of-absolute-row'],
['delete_misses', 'misses', 'percent-of-absolute-row'],
- ]},
+ ]
+ },
'cas': {
'options': [None, 'Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
'lines': [
['cas_hits', 'hits', 'percent-of-absolute-row'],
['cas_misses', 'misses', 'percent-of-absolute-row'],
['cas_badval', 'bad value', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'increment': {
'options': [None, 'Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
'lines': [
['incr_hits', 'hits', 'percent-of-absolute-row'],
['incr_misses', 'misses', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'decrement': {
'options': [None, 'Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
'lines': [
['decr_hits', 'hits', 'percent-of-absolute-row'],
['decr_misses', 'misses', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'touch': {
'options': [None, 'Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
'lines': [
['touch_hits', 'hits', 'percent-of-absolute-row'],
['touch_misses', 'misses', 'percent-of-absolute-row']
- ]},
+ ]
+ },
'touch_rate': {
'options': [None, 'Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
'lines': [
['cmd_touch', 'rate', 'incremental']
- ]}
+ ]
+ }
}
class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
- self.request = "stats\r\n"
- self.host = "localhost"
+ self.request = 'stats\r\n'
+ self.host = 'localhost'
self.port = 11211
self._keep_alive = True
self.unix_socket = None
@@ -131,13 +146,13 @@ class Service(SocketService):
return None
if response.startswith('ERROR'):
- self.error("received ERROR")
+ self.error('received ERROR')
return None
try:
- parsed = response.split("\n")
+ parsed = response.split('\n')
except AttributeError:
- self.error("response is invalid/empty")
+ self.error('response is invalid/empty')
return None
# split the response
@@ -148,7 +163,7 @@ class Service(SocketService):
t = line[5:].split(' ')
data[t[0]] = t[1]
except (IndexError, ValueError):
- self.debug("invalid line received: " + str(line))
+ self.debug('invalid line received: ' + str(line))
if not data:
self.error("received data doesn't have any records")
@@ -165,10 +180,10 @@ class Service(SocketService):
def _check_raw_data(self, data):
if data.endswith('END\r\n'):
- self.debug("received full response from memcached")
+ self.debug('received full response from memcached')
return True
- self.debug("waiting more data from memcached")
+ self.debug('waiting more data from memcached')
return False
def check(self):
diff --git a/conf.d/python.d/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf
index 85c3daf65..85c3daf65 100644
--- a/conf.d/python.d/memcached.conf
+++ b/collectors/python.d.plugin/memcached/memcached.conf
diff --git a/collectors/python.d.plugin/mongodb/Makefile.inc b/collectors/python.d.plugin/mongodb/Makefile.inc
new file mode 100644
index 000000000..784945aa6
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mongodb/mongodb.chart.py
+dist_pythonconfig_DATA += mongodb/mongodb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mongodb/README.md mongodb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
new file mode 100644
index 000000000..8e5f652c5
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -0,0 +1,141 @@
+# mongodb
+
+Module monitor mongodb performance and health metrics
+
+**Requirements:**
+ * `python-pymongo` package v2.4+.
+
+You need to install it manually.
+
+
+Number of charts depends on mongodb version, storage engine and other features (replication):
+
+1. **Read requests**:
+ * query
+ * getmore (operation the cursor executes to get additional data from query)
+
+2. **Write requests**:
+ * insert
+ * delete
+ * update
+
+3. **Active clients**:
+ * readers (number of clients with read operations in progress or queued)
+ * writers (number of clients with write operations in progress or queued)
+
+4. **Journal transactions**:
+ * commits (count of transactions that have been written to the journal)
+
+5. **Data written to the journal**:
+ * volume (volume of data)
+
+6. **Background flush** (MMAPv1):
+ * average ms (average time taken by flushes to execute)
+ * last ms (time taken by the last flush)
+
+8. **Read tickets** (WiredTiger):
+ * in use (number of read tickets in use)
+ * available (number of available read tickets remaining)
+
+9. **Write tickets** (WiredTiger):
+ * in use (number of write tickets in use)
+ * available (number of available write tickets remaining)
+
+10. **Cursors**:
+ * opened (number of cursors currently opened by MongoDB for clients)
+ * timedOut (number of cursors that have timed)
+ * noTimeout (number of open cursors with timeout disabled)
+
+11. **Connections**:
+ * connected (number of clients currently connected to the database server)
+ * unused (number of unused connections available for new clients)
+
+12. **Memory usage metrics**:
+ * virtual
+ * resident (amount of memory used by the database process)
+ * mapped
+ * non mapped
+
+13. **Page faults**:
+ * page faults (number of times MongoDB had to request from disk)
+
+14. **Cache metrics** (WiredTiger):
+ * percentage of bytes currently in the cache (amount of space taken by cached data)
+ * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
+
+15. **Pages evicted from cache** (WiredTiger):
+ * modified
+ * unmodified
+
+16. **Queued requests**:
+ * readers (number of read request currently queued)
+ * writers (number of write request currently queued)
+
+17. **Errors**:
+ * msg (number of message assertions raised)
+ * warning (number of warning assertions raised)
+ * regular (number of regular assertions raised)
+ * user (number of assertions corresponding to errors generated by users)
+
+18. **Storage metrics** (one chart for every database)
+ * dataSize (size of all documents + padding in the database)
+ * indexSize (size of all indexes in the database)
+ * storageSize (size of all extents in the database)
+
+19. **Documents in the database** (one chart for all databases)
+ * documents (number of objects in the database among all the collections)
+
+20. **tcmalloc metrics**
+ * central cache free
+ * current total thread cache
+ * pageheap free
+ * pageheap unmapped
+ * thread cache free
+ * transfer cache free
+ * heap size
+
+21. **Commands total/failed rate**
+ * count
+ * createIndex
+ * delete
+ * eval
+ * findAndModify
+ * insert
+
+22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
+ * Global lock
+ * Database lock
+ * Collection lock
+ * Metadata lock
+ * oplog lock
+
+23. **Replica set members state**
+ * state
+
+24. **Oplog window**
+ * window (interval of time between the oldest and the latest entries in the oplog)
+
+25. **Replication lag**
+ * member (time when last entry from the oplog was applied for every member)
+
+26. **Replication set member heartbeat latency**
+ * member (time when last heartbeat was received from replica set member)
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 27017
+ user : 'netdata'
+ pass : 'netdata'
+
+```
+
+If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
+
+---
diff --git a/python.d/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
index 909a419da..10344342d 100644
--- a/python.d/mongodb.chart.py
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: mongodb netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from copy import deepcopy
from datetime import datetime
@@ -31,7 +32,8 @@ REPL_SET_STATES = [
('6', 'unknown'),
('9', 'rollback'),
('10', 'removed'),
- ('0', 'startup')]
+ ('0', 'startup')
+]
def multiply_by_100(value):
@@ -141,12 +143,37 @@ DBSTATS = [
]
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['read_operations', 'write_operations', 'active_clients', 'journaling_transactions',
- 'journaling_volume', 'background_flush_average', 'background_flush_last', 'background_flush_rate',
- 'wiredtiger_read', 'wiredtiger_write', 'cursors', 'connections', 'memory', 'page_faults',
- 'queued_requests', 'record_moves', 'wiredtiger_cache', 'wiredtiger_pages_evicted', 'asserts',
- 'locks_collection', 'locks_database', 'locks_global', 'locks_metadata', 'locks_oplog',
- 'dbstats_objects', 'tcmalloc_generic', 'tcmalloc_metrics', 'command_total_rate', 'command_failed_rate']
+ORDER = [
+ 'read_operations',
+ 'write_operations',
+ 'active_clients',
+ 'journaling_transactions',
+ 'journaling_volume',
+ 'background_flush_average',
+ 'background_flush_last',
+ 'background_flush_rate',
+ 'wiredtiger_read',
+ 'wiredtiger_write',
+ 'cursors',
+ 'connections',
+ 'memory',
+ 'page_faults',
+ 'queued_requests',
+ 'record_moves',
+ 'wiredtiger_cache',
+ 'wiredtiger_pages_evicted',
+ 'asserts',
+ 'locks_collection',
+ 'locks_database',
+ 'locks_global',
+ 'locks_metadata',
+ 'locks_oplog',
+ 'dbstats_objects',
+ 'tcmalloc_generic',
+ 'tcmalloc_metrics',
+ 'command_total_rate',
+ 'command_failed_rate'
+]
CHARTS = {
'read_operations': {
@@ -155,7 +182,8 @@ CHARTS = {
'lines': [
['query', None, 'incremental'],
['getmore', None, 'incremental']
- ]},
+ ]
+ },
'write_operations': {
'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
'mongodb.write_operations', 'line'],
@@ -163,57 +191,66 @@ CHARTS = {
['insert', None, 'incremental'],
['update', None, 'incremental'],
['delete', None, 'incremental']
- ]},
+ ]
+ },
'active_clients': {
'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
'throughput metrics', 'mongodb.active_clients', 'line'],
'lines': [
['activeClients_readers', 'readers', 'absolute'],
['activeClients_writers', 'writers', 'absolute']
- ]},
+ ]
+ },
'journaling_transactions': {
'options': [None, 'Transactions that have been written to the journal', 'commits',
'database performance', 'mongodb.journaling_transactions', 'line'],
'lines': [
['commits', None, 'absolute']
- ]},
+ ]
+ },
'journaling_volume': {
'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
'mongodb.journaling_volume', 'line'],
'lines': [
['journaledMB', 'volume', 'absolute', 1, 100]
- ]},
+ ]
+ },
'background_flush_average': {
'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
'mongodb.background_flush_average', 'line'],
'lines': [
['average_ms', 'time', 'absolute', 1, 100]
- ]},
+ ]
+ },
'background_flush_last': {
'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
'mongodb.background_flush_last', 'line'],
'lines': [
['last_ms', 'time', 'absolute', 1, 100]
- ]},
+ ]
+ },
'background_flush_rate': {
'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
'lines': [
['flushes', 'flushes', 'incremental', 1, 1]
- ]},
+ ]
+ },
'wiredtiger_read': {
'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
'mongodb.wiredtiger_read', 'stacked'],
'lines': [
['wiredTigerRead_available', 'available', 'absolute', 1, 1],
['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
- ]},
+ ]
+ },
'wiredtiger_write': {
'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
'mongodb.wiredtiger_write', 'stacked'],
'lines': [
['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
- ]},
+ ]
+ },
'cursors': {
'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
@@ -221,14 +258,16 @@ CHARTS = {
['cursor_total', 'openned', 'absolute', 1, 1],
['noTimeout', None, 'absolute', 1, 1],
['timedOut', None, 'incremental', 1, 1]
- ]},
+ ]
+ },
'connections': {
'options': [None, 'Currently connected clients and unused connections', 'connections',
'resource utilization', 'mongodb.connections', 'stacked'],
'lines': [
['connections_available', 'unused', 'absolute', 1, 1],
['connections_current', 'connected', 'absolute', 1, 1]
- ]},
+ ]
+ },
'memory': {
'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
'lines': [
@@ -236,60 +275,70 @@ CHARTS = {
['resident', None, 'absolute', 1, 1],
['nonmapped', None, 'absolute', 1, 1],
['mapped', None, 'absolute', 1, 1]
- ]},
+ ]
+ },
'page_faults': {
'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
'resource utilization', 'mongodb.page_faults', 'line'],
'lines': [
['page_faults', None, 'incremental', 1, 1]
- ]},
+ ]
+ },
'queued_requests': {
- 'options': [None, 'Currently queued read and wrire requests', 'requests', 'resource saturation',
+ 'options': [None, 'Currently queued read and write requests', 'requests', 'resource saturation',
'mongodb.queued_requests', 'line'],
'lines': [
['currentQueue_readers', 'readers', 'absolute', 1, 1],
['currentQueue_writers', 'writers', 'absolute', 1, 1]
- ]},
+ ]
+ },
'record_moves': {
'options': [None, 'Number of times documents had to be moved on-disk', 'number',
'resource saturation', 'mongodb.record_moves', 'line'],
'lines': [
['moves', None, 'incremental', 1, 1]
- ]},
+ ]
+ },
'asserts': {
- 'options': [None, 'Number of message, warning, regular, corresponding to errors generated'
- ' by users assertions raised', 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
+ 'options': [
+ None,
+ 'Number of message, warning, regular, corresponding to errors generated by users assertions raised',
+ 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
'lines': [
['msg', None, 'incremental', 1, 1],
['warning', None, 'incremental', 1, 1],
['regular', None, 'incremental', 1, 1],
['user', None, 'incremental', 1, 1]
- ]},
+ ]
+ },
'wiredtiger_cache': {
'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
'lines': [
['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'wiredtiger_pages_evicted': {
'options': [None, 'Pages evicted from the cache',
'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
'lines': [
['unmodified', None, 'absolute', 1, 1],
['modified', None, 'absolute', 1, 1]
- ]},
+ ]
+ },
'dbstats_objects': {
'options': [None, 'Number of documents in the database among all the collections', 'documents',
'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'tcmalloc_generic': {
'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
'lines': [
['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
['heap_size', 'heap_size', 'absolute', 1, 1048576]
- ]},
+ ]
+ },
'tcmalloc_metrics': {
'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
'lines': [
@@ -299,7 +348,8 @@ CHARTS = {
['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
- ]},
+ ]
+ },
'command_total_rate': {
'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
'lines': [
@@ -310,7 +360,8 @@ CHARTS = {
['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
['insert_total', 'insert', 'incremental', 1, 1],
['update_total', 'update', 'incremental', 1, 1]
- ]},
+ ]
+ },
'command_failed_rate': {
'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
'lines': [
@@ -321,7 +372,8 @@ CHARTS = {
['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
['insert_failed', 'insert', 'incremental', 1, 1],
['update_failed', 'update', 'incremental', 1, 1]
- ]},
+ ]
+ },
'locks_collection': {
'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
@@ -330,7 +382,8 @@ CHARTS = {
['Collection_W', 'exclusive', 'incremental'],
['Collection_r', 'intent_shared', 'incremental'],
['Collection_w', 'intent_exclusive', 'incremental']
- ]},
+ ]
+ },
'locks_database': {
'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
@@ -339,7 +392,8 @@ CHARTS = {
['Database_W', 'exclusive', 'incremental'],
['Database_r', 'intent_shared', 'incremental'],
['Database_w', 'intent_exclusive', 'incremental']
- ]},
+ ]
+ },
'locks_global': {
'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
@@ -348,21 +402,24 @@ CHARTS = {
['Global_W', 'exclusive', 'incremental'],
['Global_r', 'intent_shared', 'incremental'],
['Global_w', 'intent_exclusive', 'incremental']
- ]},
+ ]
+ },
'locks_metadata': {
'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
'lines': [
['Metadata_R', 'shared', 'incremental'],
['Metadata_w', 'intent_exclusive', 'incremental']
- ]},
+ ]
+ },
'locks_oplog': {
'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
'lines': [
['oplog_r', 'intent_shared', 'incremental'],
['oplog_w', 'intent_exclusive', 'incremental']
- ]}
+ ]
+ }
}
@@ -383,7 +440,7 @@ class Service(SimpleService):
def check(self):
if not PYMONGO:
- self.error('Pymongo module is needed to use mongodb.chart.py')
+ self.error('Pymongo package v2.4+ is needed to use mongodb.chart.py')
return False
self.connection, server_status, error = self._create_connection()
if error:
@@ -491,9 +548,10 @@ class Service(SimpleService):
# Create "heartbeat delay" chart
self.order.append('heartbeat_delay')
self.definitions['heartbeat_delay'] = {
- 'options': [None, 'Time when last heartbeat was received'
- ' from the replica set member (lastHeartbeatRecv)',
- 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
+ 'options': [
+ None,
+ 'Time when last heartbeat was received from the replica set member (lastHeartbeatRecv)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
'lines': create_lines(other_hosts, 'heartbeat_lag')}
# Create "optimedate delay" chart
self.order.append('optimedate_delay')
@@ -561,9 +619,9 @@ class Service(SimpleService):
raw_data['getReplicationInfo'] = dict()
try:
raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
- "$natural", ASCENDING).limit(1)[0]
+ '$natural', ASCENDING).limit(1)[0]
raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
- "$natural", DESCENDING).limit(1)[0]
+ '$natural', DESCENDING).limit(1)[0]
return raw_data
except PyMongoError:
return None
diff --git a/conf.d/python.d/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf
index 62faef68d..62faef68d 100644
--- a/conf.d/python.d/mongodb.conf
+++ b/collectors/python.d.plugin/mongodb/mongodb.conf
diff --git a/collectors/python.d.plugin/monit/Makefile.inc b/collectors/python.d.plugin/monit/Makefile.inc
new file mode 100644
index 000000000..4a3673fd5
--- /dev/null
+++ b/collectors/python.d.plugin/monit/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += monit/monit.chart.py
+dist_pythonconfig_DATA += monit/monit.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += monit/README.md monit/Makefile.inc
+
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
new file mode 100644
index 000000000..6d10240c9
--- /dev/null
+++ b/collectors/python.d.plugin/monit/README.md
@@ -0,0 +1,33 @@
+# monit
+
+Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
+
+1. **Filesystems**
+ * Filesystems
+ * Directories
+ * Files
+ * Pipes
+
+2. **Applications**
+ * Processes (+threads/childs)
+ * Programs
+
+3. **Network**
+ * Hosts (+latency)
+ * Network interfaces
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://localhost:2812'
+ user: : admin
+ pass: : monit
+```
+
+If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`.
+
+---
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
new file mode 100644
index 000000000..51943c0e1
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.chart.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+# Description: monit netdata python.d module
+# Author: Evgeniy K. (n0guest)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import xml.etree.ElementTree as ET
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
+MONIT_SERVICE_NAMES = ['Filesystem', 'Directory', 'File', 'Process', 'Host', 'System', 'Fifo', 'Program', 'Net']
+DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8]
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'filesystem',
+ 'directory',
+ 'file',
+ 'process',
+ 'process_uptime',
+ 'process_threads',
+ 'process_children',
+ 'host',
+ 'host_latency',
+ 'system',
+ 'fifo',
+ 'program',
+ 'net'
+]
+CHARTS = {
+ 'filesystem': {
+ 'options': ['filesystems', 'Filesystems', 'filesystems', 'filesystem', 'monit.filesystems', 'line'],
+ 'lines': []
+ },
+ 'directory': {
+ 'options': ['directories', 'Directories', 'directories', 'filesystem', 'monit.directories', 'line'],
+ 'lines': []
+ },
+ 'file': {
+ 'options': ['files', 'Files', 'files', 'filesystem', 'monit.files', 'line'],
+ 'lines': []
+ },
+ 'fifo': {
+ 'options': ['fifos', 'Pipes (fifo)', 'pipes', 'filesystem', 'monit.fifos', 'line'],
+ 'lines': []
+ },
+ 'program': {
+ 'options': ['programs', 'Programs statuses', 'programs', 'applications', 'monit.programs', 'line'],
+ 'lines': []
+ },
+ 'process': {
+ 'options': ['processes', 'Processes statuses', 'processes', 'applications', 'monit.services', 'line'],
+ 'lines': []
+ },
+ 'process_uptime': {
+ 'options': ['processes uptime', 'Processes uptime', 'seconds', 'applications',
+ 'monit.process_uptime', 'line', 'hidden'],
+ 'lines': []
+ },
+ 'process_threads': {
+ 'options': ['processes threads', 'Processes threads', 'threads', 'applications',
+ 'monit.process_threads', 'line'],
+ 'lines': []
+ },
+ 'process_children': {
+ 'options': ['processes childrens', 'Child processes', 'childrens', 'applications',
+ 'monit.process_childrens', 'line'],
+ 'lines': []
+ },
+ 'host': {
+ 'options': ['hosts', 'Hosts', 'hosts', 'network', 'monit.hosts', 'line'],
+ 'lines': []
+ },
+ 'host_latency': {
+ 'options': ['hosts latency', 'Hosts latency', 'milliseconds/s', 'network', 'monit.host_latency', 'line'],
+ 'lines': []
+ },
+ 'net': {
+ 'options': ['interfaces', 'Network interfaces and addresses', 'interfaces', 'network',
+ 'monit.networks', 'line'],
+ 'lines': []
+ },
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ base_url = self.configuration.get('url', 'http://localhost:2812')
+ self.url = '{0}/_status?format=xml&level=full'.format(base_url)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def parse(self, data):
+ try:
+ xml = ET.fromstring(data)
+ except ET.ParseError:
+ self.error("URL {0} didn't return a vaild XML page. Please check your settings.".format(self.url))
+ return None
+ return xml
+
+ def check(self):
+ self._manager = self._build_manager()
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ return bool(self.parse(raw_data))
+
+ def _get_data(self):
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ xml = self.parse(raw_data)
+ if not xml:
+ return None
+
+ data = {}
+ for service_id in DEFAULT_SERVICES_IDS:
+ service_category = MONIT_SERVICE_NAMES[service_id].lower()
+ if service_category == 'system':
+ self.debug("Skipping service from 'System' category, because it's useless in graphs")
+ continue
+
+ xpath_query = "./service[@type='{0}']".format(service_id)
+ self.debug('Searching for {0} as {1}'.format(service_category, xpath_query))
+ for service_node in xml.findall(xpath_query):
+
+ service_name = service_node.find('name').text
+ service_status = service_node.find('status').text
+ service_monitoring = service_node.find('monitor').text
+ self.debug('=> found {0} with type={1}, status={2}, monitoring={3}'.format(service_name,
+ service_id, service_status, service_monitoring))
+
+ dimension_key = service_category + '_' + service_name
+ if dimension_key not in self.charts[service_category]:
+ self.charts[service_category].add_dimension([dimension_key, service_name, 'absolute'])
+ data[dimension_key] = 1 if service_status == '0' and service_monitoring == '1' else 0
+
+ if service_category == 'process':
+ for subnode in ('uptime', 'threads', 'children'):
+ subnode_value = service_node.find(subnode)
+ if subnode_value is None:
+ continue
+ if subnode == 'uptime' and int(subnode_value.text) < 0:
+ self.debug('Skipping bugged metrics with negative uptime (monit before v5.16')
+ continue
+ dimension_key = 'process_{0}_{1}'.format(subnode, service_name)
+ if dimension_key not in self.charts['process_' + subnode]:
+ self.charts['process_' + subnode].add_dimension([dimension_key, service_name, 'absolute'])
+ data[dimension_key] = int(subnode_value.text)
+
+ if service_category == 'host':
+ subnode_value = service_node.find('./icmp/responsetime')
+ if subnode_value is None:
+ continue
+ dimension_key = 'host_latency_{0}'.format(service_name)
+ if dimension_key not in self.charts['host_latency']:
+ self.charts['host_latency'].add_dimension([dimension_key, service_name,
+ 'absolute', 1000, 1000000])
+ data[dimension_key] = float(subnode_value.text) * 1000000
+
+ return data or None
diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf
new file mode 100644
index 000000000..f9c26dbc3
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.conf
@@ -0,0 +1,88 @@
+# netdata python.d.plugin configuration for monit
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'URL' # the URL to fetch monit's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# Example
+#
+# local:
+# name : 'Local Monit'
+# url : 'http://localhost:2812'
+#
+# "local" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
+# in the monit section.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:2812'
diff --git a/collectors/python.d.plugin/mysql/Makefile.inc b/collectors/python.d.plugin/mysql/Makefile.inc
new file mode 100644
index 000000000..03e8b65eb
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mysql/mysql.chart.py
+dist_pythonconfig_DATA += mysql/mysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mysql/README.md mysql/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
new file mode 100644
index 000000000..e38098e7e
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -0,0 +1,90 @@
+# mysql
+
+Module monitors one or more mysql servers
+
+**Requirements:**
+ * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
+
+It will produce following charts (if data is available):
+
+1. **Bandwidth** in kbps
+ * in
+ * out
+
+2. **Queries** in queries/sec
+ * queries
+ * questions
+ * slow queries
+
+3. **Operations** in operations/sec
+ * opened tables
+ * flush
+ * commit
+ * delete
+ * prepare
+ * read first
+ * read key
+ * read next
+ * read prev
+ * read random
+ * read random next
+ * rollback
+ * save point
+ * update
+ * write
+
+4. **Table Locks** in locks/sec
+ * immediate
+ * waited
+
+5. **Select Issues** in issues/sec
+ * full join
+ * full range join
+ * range
+ * range check
+ * scan
+
+6. **Sort Issues** in issues/sec
+ * merge passes
+ * range
+ * scan
+
+### configuration
+
+You can provide, per server, the following:
+
+1. username which have access to database (defaults to 'root')
+2. password (defaults to none)
+3. mysql my.cnf configuration file
+4. mysql socket (optional)
+5. mysql host (ip or hostname)
+6. mysql port (defaults to 3306)
+
+Here is an example for 3 servers:
+
+```yaml
+update_every : 10
+priority : 90100
+retries : 5
+
+local:
+ 'my.cnf' : '/etc/mysql/my.cnf'
+ priority : 90000
+
+local_2:
+ user : 'root'
+ pass : 'blablablabla'
+ socket : '/var/run/mysqld/mysqld.sock'
+ update_every : 1
+
+remote:
+ user : 'admin'
+ pass : 'bla'
+ host : 'example.org'
+ port : 9000
+ retries : 20
+```
+
+If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root`
+
+---
diff --git a/python.d/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
index 4c7058b26..c4d1e8b3a 100644
--- a/python.d/mysql.chart.py
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description: MySQL netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.MySQLService import MySQLService
@@ -12,118 +14,127 @@ retries = 60
# query executed on MySQL server
QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
QUERY_SLAVE = 'SHOW SLAVE STATUS;'
+QUERY_VARIABLES = 'SHOW GLOBAL VARIABLES LIKE \'max_connections\';'
GLOBAL_STATS = [
- 'Bytes_received',
- 'Bytes_sent',
- 'Queries',
- 'Questions',
- 'Slow_queries',
- 'Handler_commit',
- 'Handler_delete',
- 'Handler_prepare',
- 'Handler_read_first',
- 'Handler_read_key',
- 'Handler_read_next',
- 'Handler_read_prev',
- 'Handler_read_rnd',
- 'Handler_read_rnd_next',
- 'Handler_rollback',
- 'Handler_savepoint',
- 'Handler_savepoint_rollback',
- 'Handler_update',
- 'Handler_write',
- 'Table_locks_immediate',
- 'Table_locks_waited',
- 'Select_full_join',
- 'Select_full_range_join',
- 'Select_range',
- 'Select_range_check',
- 'Select_scan',
- 'Sort_merge_passes',
- 'Sort_range',
- 'Sort_scan',
- 'Created_tmp_disk_tables',
- 'Created_tmp_files',
- 'Created_tmp_tables',
- 'Connections',
- 'Aborted_connects',
- 'Binlog_cache_disk_use',
- 'Binlog_cache_use',
- 'Threads_connected',
- 'Threads_created',
- 'Threads_cached',
- 'Threads_running',
- 'Thread_cache_misses',
- 'Innodb_data_read',
- 'Innodb_data_written',
- 'Innodb_data_reads',
- 'Innodb_data_writes',
- 'Innodb_data_fsyncs',
- 'Innodb_data_pending_reads',
- 'Innodb_data_pending_writes',
- 'Innodb_data_pending_fsyncs',
- 'Innodb_log_waits',
- 'Innodb_log_write_requests',
- 'Innodb_log_writes',
- 'Innodb_os_log_fsyncs',
- 'Innodb_os_log_pending_fsyncs',
- 'Innodb_os_log_pending_writes',
- 'Innodb_os_log_written',
- 'Innodb_row_lock_current_waits',
- 'Innodb_rows_inserted',
- 'Innodb_rows_read',
- 'Innodb_rows_updated',
- 'Innodb_rows_deleted',
- 'Innodb_buffer_pool_pages_data',
- 'Innodb_buffer_pool_pages_dirty',
- 'Innodb_buffer_pool_pages_free',
- 'Innodb_buffer_pool_pages_flushed',
- 'Innodb_buffer_pool_pages_misc',
- 'Innodb_buffer_pool_pages_total',
- 'Innodb_buffer_pool_bytes_data',
- 'Innodb_buffer_pool_bytes_dirty',
- 'Innodb_buffer_pool_read_ahead',
- 'Innodb_buffer_pool_read_ahead_evicted',
- 'Innodb_buffer_pool_read_ahead_rnd',
- 'Innodb_buffer_pool_read_requests',
- 'Innodb_buffer_pool_write_requests',
- 'Innodb_buffer_pool_reads',
- 'Innodb_buffer_pool_wait_free',
- 'Qcache_hits',
- 'Qcache_lowmem_prunes',
- 'Qcache_inserts',
- 'Qcache_not_cached',
- 'Qcache_queries_in_cache',
- 'Qcache_free_memory',
- 'Qcache_free_blocks',
- 'Qcache_total_blocks',
- 'Key_blocks_unused',
- 'Key_blocks_used',
- 'Key_blocks_not_flushed',
- 'Key_read_requests',
- 'Key_write_requests',
- 'Key_reads',
- 'Key_writes',
- 'Open_files',
- 'Opened_files',
- 'Binlog_stmt_cache_disk_use',
- 'Binlog_stmt_cache_use',
- 'Connection_errors_accept',
- 'Connection_errors_internal',
- 'Connection_errors_max_connections',
- 'Connection_errors_peer_address',
- 'Connection_errors_select',
- 'Connection_errors_tcpwrap',
- 'wsrep_local_recv_queue',
- 'wsrep_local_send_queue',
- 'wsrep_received',
- 'wsrep_replicated',
- 'wsrep_received_bytes',
- 'wsrep_replicated_bytes',
- 'wsrep_local_bf_aborts',
- 'wsrep_local_cert_failures',
- 'wsrep_flow_control_paused_ns']
+ 'Bytes_received',
+ 'Bytes_sent',
+ 'Queries',
+ 'Questions',
+ 'Slow_queries',
+ 'Handler_commit',
+ 'Handler_delete',
+ 'Handler_prepare',
+ 'Handler_read_first',
+ 'Handler_read_key',
+ 'Handler_read_next',
+ 'Handler_read_prev',
+ 'Handler_read_rnd',
+ 'Handler_read_rnd_next',
+ 'Handler_rollback',
+ 'Handler_savepoint',
+ 'Handler_savepoint_rollback',
+ 'Handler_update',
+ 'Handler_write',
+ 'Table_locks_immediate',
+ 'Table_locks_waited',
+ 'Select_full_join',
+ 'Select_full_range_join',
+ 'Select_range',
+ 'Select_range_check',
+ 'Select_scan',
+ 'Sort_merge_passes',
+ 'Sort_range',
+ 'Sort_scan',
+ 'Created_tmp_disk_tables',
+ 'Created_tmp_files',
+ 'Created_tmp_tables',
+ 'Connections',
+ 'Aborted_connects',
+ 'Max_used_connections',
+ 'Binlog_cache_disk_use',
+ 'Binlog_cache_use',
+ 'Threads_connected',
+ 'Threads_created',
+ 'Threads_cached',
+ 'Threads_running',
+ 'Thread_cache_misses',
+ 'Innodb_data_read',
+ 'Innodb_data_written',
+ 'Innodb_data_reads',
+ 'Innodb_data_writes',
+ 'Innodb_data_fsyncs',
+ 'Innodb_data_pending_reads',
+ 'Innodb_data_pending_writes',
+ 'Innodb_data_pending_fsyncs',
+ 'Innodb_log_waits',
+ 'Innodb_log_write_requests',
+ 'Innodb_log_writes',
+ 'Innodb_os_log_fsyncs',
+ 'Innodb_os_log_pending_fsyncs',
+ 'Innodb_os_log_pending_writes',
+ 'Innodb_os_log_written',
+ 'Innodb_row_lock_current_waits',
+ 'Innodb_rows_inserted',
+ 'Innodb_rows_read',
+ 'Innodb_rows_updated',
+ 'Innodb_rows_deleted',
+ 'Innodb_buffer_pool_pages_data',
+ 'Innodb_buffer_pool_pages_dirty',
+ 'Innodb_buffer_pool_pages_free',
+ 'Innodb_buffer_pool_pages_flushed',
+ 'Innodb_buffer_pool_pages_misc',
+ 'Innodb_buffer_pool_pages_total',
+ 'Innodb_buffer_pool_bytes_data',
+ 'Innodb_buffer_pool_bytes_dirty',
+ 'Innodb_buffer_pool_read_ahead',
+ 'Innodb_buffer_pool_read_ahead_evicted',
+ 'Innodb_buffer_pool_read_ahead_rnd',
+ 'Innodb_buffer_pool_read_requests',
+ 'Innodb_buffer_pool_write_requests',
+ 'Innodb_buffer_pool_reads',
+ 'Innodb_buffer_pool_wait_free',
+ 'Qcache_hits',
+ 'Qcache_lowmem_prunes',
+ 'Qcache_inserts',
+ 'Qcache_not_cached',
+ 'Qcache_queries_in_cache',
+ 'Qcache_free_memory',
+ 'Qcache_free_blocks',
+ 'Qcache_total_blocks',
+ 'Key_blocks_unused',
+ 'Key_blocks_used',
+ 'Key_blocks_not_flushed',
+ 'Key_read_requests',
+ 'Key_write_requests',
+ 'Key_reads',
+ 'Key_writes',
+ 'Open_files',
+ 'Opened_files',
+ 'Binlog_stmt_cache_disk_use',
+ 'Binlog_stmt_cache_use',
+ 'Connection_errors_accept',
+ 'Connection_errors_internal',
+ 'Connection_errors_max_connections',
+ 'Connection_errors_peer_address',
+ 'Connection_errors_select',
+ 'Connection_errors_tcpwrap',
+ 'wsrep_local_recv_queue',
+ 'wsrep_local_send_queue',
+ 'wsrep_received',
+ 'wsrep_replicated',
+ 'wsrep_received_bytes',
+ 'wsrep_replicated_bytes',
+ 'wsrep_local_bf_aborts',
+ 'wsrep_local_cert_failures',
+ 'wsrep_flow_control_paused_ns',
+ 'Com_delete',
+ 'Com_insert',
+ 'Com_select',
+ 'Com_update',
+ 'Com_replace'
+]
+
def slave_seconds(value):
try:
@@ -142,22 +153,56 @@ SLAVE_STATS = [
('Slave_IO_Running', slave_running)
]
-ORDER = ['net',
- 'queries',
- 'handlers',
- 'table_locks',
- 'join_issues', 'sort_issues',
- 'tmp',
- 'connections', 'connection_errors',
- 'binlog_cache', 'binlog_stmt_cache',
- 'threads', 'thread_cache_misses',
- 'innodb_io', 'innodb_io_ops', 'innodb_io_pending_ops', 'innodb_log', 'innodb_os_log', 'innodb_os_log_io',
- 'innodb_cur_row_lock', 'innodb_rows', 'innodb_buffer_pool_pages', 'innodb_buffer_pool_bytes',
- 'innodb_buffer_pool_read_ahead', 'innodb_buffer_pool_reqs', 'innodb_buffer_pool_ops',
- 'qcache_ops', 'qcache', 'qcache_freemem', 'qcache_memblocks',
- 'key_blocks', 'key_requests', 'key_disk_ops',
- 'files', 'files_rate', 'slave_behind', 'slave_status',
- 'galera_writesets', 'galera_bytes', 'galera_queue', 'galera_conflicts', 'galera_flow_control']
+VARIABLES = [
+ 'max_connections'
+]
+
+ORDER = [
+ 'net',
+ 'queries',
+ 'queries_type',
+ 'handlers',
+ 'table_locks',
+ 'join_issues',
+ 'sort_issues',
+ 'tmp',
+ 'connections',
+ 'connections_active',
+ 'connection_errors',
+ 'binlog_cache',
+ 'binlog_stmt_cache',
+ 'threads',
+ 'thread_cache_misses',
+ 'innodb_io',
+ 'innodb_io_ops',
+ 'innodb_io_pending_ops',
+ 'innodb_log',
+ 'innodb_os_log',
+ 'innodb_os_log_io',
+ 'innodb_cur_row_lock',
+ 'innodb_rows',
+ 'innodb_buffer_pool_pages',
+ 'innodb_buffer_pool_bytes',
+ 'innodb_buffer_pool_read_ahead',
+ 'innodb_buffer_pool_reqs',
+ 'innodb_buffer_pool_ops',
+ 'qcache_ops',
+ 'qcache',
+ 'qcache_freemem',
+ 'qcache_memblocks',
+ 'key_blocks',
+ 'key_requests',
+ 'key_disk_ops',
+ 'files',
+ 'files_rate',
+ 'slave_behind',
+ 'slave_status',
+ 'galera_writesets',
+ 'galera_bytes',
+ 'galera_queue',
+ 'galera_conflicts',
+ 'galera_flow_control'
+]
CHARTS = {
'net': {
@@ -165,14 +210,27 @@ CHARTS = {
'lines': [
['Bytes_received', 'in', 'incremental', 8, 1024],
['Bytes_sent', 'out', 'incremental', -8, 1024]
- ]},
+ ]
+ },
'queries': {
'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
'lines': [
['Queries', 'queries', 'incremental'],
['Questions', 'questions', 'incremental'],
['Slow_queries', 'slow_queries', 'incremental']
- ]},
+ ]
+ },
+ 'queries_type': {
+ 'options': [None, 'mysql Query type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
+ 'lines': [
+ ['Com_select', 'select', 'incremental'],
+ ['Com_delete', 'delete', 'incremental'],
+ ['Com_update', 'update', 'incremental'],
+ ['Com_insert', 'insert', 'incremental'],
+ ['Qcache_hits', 'cache_hits', 'incremental'],
+ ['Com_replace', 'replace', 'incremental']
+ ]
+ },
'handlers': {
'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
'lines': [
@@ -190,13 +248,15 @@ CHARTS = {
['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'],
['Handler_update', 'update', 'incremental'],
['Handler_write', 'write', 'incremental']
- ]},
+ ]
+ },
'table_locks': {
'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
'lines': [
['Table_locks_immediate', 'immediate', 'incremental'],
['Table_locks_waited', 'waited', 'incremental', -1, 1]
- ]},
+ ]
+ },
'join_issues': {
'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
'lines': [
@@ -205,33 +265,46 @@ CHARTS = {
['Select_range', 'range', 'incremental'],
['Select_range_check', 'range_check', 'incremental'],
['Select_scan', 'scan', 'incremental']
- ]},
+ ]
+ },
'sort_issues': {
'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
'lines': [
['Sort_merge_passes', 'merge_passes', 'incremental'],
['Sort_range', 'range', 'incremental'],
['Sort_scan', 'scan', 'incremental']
- ]},
+ ]
+ },
'tmp': {
'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
'lines': [
['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
['Created_tmp_files', 'files', 'incremental'],
['Created_tmp_tables', 'tables', 'incremental']
- ]},
+ ]
+ },
'connections': {
'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
'lines': [
['Connections', 'all', 'incremental'],
['Aborted_connects', 'aborted', 'incremental']
- ]},
+ ]
+ },
+ 'connections_active': {
+ 'options': [None, 'mysql Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
+ 'lines': [
+ ['Threads_connected', 'active', 'absolute'],
+ ['max_connections', 'limit', 'absolute'],
+ ['Max_used_connections', 'max_active', 'absolute']
+ ]
+ },
'binlog_cache': {
'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
'lines': [
['Binlog_cache_disk_use', 'disk', 'incremental'],
['Binlog_cache_use', 'all', 'incremental']
- ]},
+ ]
+ },
'threads': {
'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'],
'lines': [
@@ -239,25 +312,29 @@ CHARTS = {
['Threads_created', 'created', 'incremental'],
['Threads_cached', 'cached', 'absolute', -1, 1],
['Threads_running', 'running', 'absolute'],
- ]},
+ ]
+ },
'thread_cache_misses': {
'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
'lines': [
['Thread_cache_misses', 'misses', 'absolute', 1, 100]
- ]},
+ ]
+ },
'innodb_io': {
'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'],
'lines': [
['Innodb_data_read', 'read', 'incremental', 1, 1024],
['Innodb_data_written', 'write', 'incremental', -1, 1024]
- ]},
+ ]
+ },
'innodb_io_ops': {
'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
'lines': [
['Innodb_data_reads', 'reads', 'incremental'],
['Innodb_data_writes', 'writes', 'incremental', -1, 1],
['Innodb_data_fsyncs', 'fsyncs', 'incremental']
- ]},
+ ]
+ },
'innodb_io_pending_ops': {
'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb',
'mysql.innodb_io_pending_ops', 'line'],
@@ -265,32 +342,37 @@ CHARTS = {
['Innodb_data_pending_reads', 'reads', 'absolute'],
['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1],
['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute']
- ]},
+ ]
+ },
'innodb_log': {
'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
'lines': [
['Innodb_log_waits', 'waits', 'incremental'],
['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
['Innodb_log_writes', 'writes', 'incremental', -1, 1],
- ]},
+ ]
+ },
'innodb_os_log': {
'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
'lines': [
['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'],
['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1],
- ]},
+ ]
+ },
'innodb_os_log_io': {
'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
'lines': [
['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
- ]},
+ ]
+ },
'innodb_cur_row_lock': {
'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb',
'mysql.innodb_cur_row_lock', 'area'],
'lines': [
['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
- ]},
+ ]
+ },
'innodb_rows': {
'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
'lines': [
@@ -298,7 +380,8 @@ CHARTS = {
['Innodb_rows_read', 'read', 'incremental', 1, 1],
['Innodb_rows_updated', 'updated', 'incremental', 1, 1],
['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1],
- ]},
+ ]
+ },
'innodb_buffer_pool_pages': {
'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb',
'mysql.innodb_buffer_pool_pages', 'line'],
@@ -309,13 +392,15 @@ CHARTS = {
['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1],
['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
['Innodb_buffer_pool_pages_total', 'total', 'absolute']
- ]},
+ ]
+ },
'innodb_buffer_pool_bytes': {
'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
'lines': [
['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
- ]},
+ ]
+ },
'innodb_buffer_pool_read_ahead': {
'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb',
'mysql.innodb_buffer_pool_read_ahead', 'area'],
@@ -323,21 +408,24 @@ CHARTS = {
['Innodb_buffer_pool_read_ahead', 'all', 'incremental'],
['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1],
['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental']
- ]},
+ ]
+ },
'innodb_buffer_pool_reqs': {
'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
'mysql.innodb_buffer_pool_reqs', 'area'],
'lines': [
['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1]
- ]},
+ ]
+ },
'innodb_buffer_pool_ops': {
'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
'mysql.innodb_buffer_pool_ops', 'area'],
'lines': [
['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1]
- ]},
+ ]
+ },
'qcache_ops': {
'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
'lines': [
@@ -345,60 +433,70 @@ CHARTS = {
['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
['Qcache_inserts', 'inserts', 'incremental'],
['Qcache_not_cached', 'not cached', 'incremental', -1, 1]
- ]},
+ ]
+ },
'qcache': {
'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
'lines': [
['Qcache_queries_in_cache', 'queries', 'absolute']
- ]},
+ ]
+ },
'qcache_freemem': {
'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'],
'lines': [
['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
- ]},
+ ]
+ },
'qcache_memblocks': {
'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
'lines': [
['Qcache_free_blocks', 'free', 'absolute'],
['Qcache_total_blocks', 'total', 'absolute']
- ]},
+ ]
+ },
'key_blocks': {
'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
'lines': [
['Key_blocks_unused', 'unused', 'absolute'],
['Key_blocks_used', 'used', 'absolute', -1, 1],
['Key_blocks_not_flushed', 'not flushed', 'absolute']
- ]},
+ ]
+ },
'key_requests': {
'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
'lines': [
['Key_read_requests', 'reads', 'incremental'],
['Key_write_requests', 'writes', 'incremental', -1, 1]
- ]},
+ ]
+ },
'key_disk_ops': {
'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s',
'myisam', 'mysql.key_disk_ops', 'area'],
'lines': [
['Key_reads', 'reads', 'incremental'],
['Key_writes', 'writes', 'incremental', -1, 1]
- ]},
+ ]
+ },
'files': {
'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'],
'lines': [
['Open_files', 'files', 'absolute']
- ]},
+ ]
+ },
'files_rate': {
'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
'lines': [
['Opened_files', 'files', 'incremental']
- ]},
+ ]
+ },
'binlog_stmt_cache': {
'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog',
'mysql.binlog_stmt_cache', 'line'],
'lines': [
['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
['Binlog_stmt_cache_use', 'all', 'incremental']
- ]},
+ ]
+ },
'connection_errors': {
'options': [None, 'mysql Connection Errors', 'connections/s', 'connections',
'mysql.connection_errors', 'line'],
@@ -409,47 +507,55 @@ CHARTS = {
['Connection_errors_peer_address', 'peer_addr', 'incremental'],
['Connection_errors_select', 'select', 'incremental'],
['Connection_errors_tcpwrap', 'tcpwrap', 'incremental']
- ]},
+ ]
+ },
'slave_behind': {
'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
'lines': [
['Seconds_Behind_Master', 'seconds', 'absolute']
- ]},
+ ]
+ },
'slave_status': {
'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
'lines': [
['Slave_SQL_Running', 'sql_running', 'absolute'],
['Slave_IO_Running', 'io_running', 'absolute']
- ]},
+ ]
+ },
'galera_writesets': {
'options': [None, 'Replicated writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
'lines': [
['wsrep_received', 'rx', 'incremental'],
['wsrep_replicated', 'tx', 'incremental', -1, 1],
- ]},
+ ]
+ },
'galera_bytes': {
'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'],
'lines': [
['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
- ]},
+ ]
+ },
'galera_queue': {
'options': [None, 'Galera queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
'lines': [
['wsrep_local_recv_queue', 'rx', 'absolute'],
['wsrep_local_send_queue', 'tx', 'absolute', -1, 1],
- ]},
+ ]
+ },
'galera_conflicts': {
'options': [None, 'Replication conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
'lines': [
['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'],
['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1],
- ]},
+ ]
+ },
'galera_flow_control': {
'options': [None, 'Flow control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
'lines': [
['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000],
- ]}
+ ]
+ }
}
@@ -458,7 +564,7 @@ class Service(MySQLService):
MySQLService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE)
+ self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE, variables=QUERY_VARIABLES)
def _get_data(self):
@@ -487,5 +593,10 @@ class Service(MySQLService):
else:
self.queries.pop('slave_status')
- return to_netdata or None
+ if 'variables' in raw_data:
+ variables = dict(raw_data['variables'][0])
+ for key in VARIABLES:
+ if key in variables:
+ to_netdata[key] = variables[key]
+ return to_netdata or None
diff --git a/conf.d/python.d/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf
index b5956a2c6..b5956a2c6 100644
--- a/conf.d/python.d/mysql.conf
+++ b/collectors/python.d.plugin/mysql/mysql.conf
diff --git a/collectors/python.d.plugin/nginx/Makefile.inc b/collectors/python.d.plugin/nginx/Makefile.inc
new file mode 100644
index 000000000..4636aa830
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nginx/nginx.chart.py
+dist_pythonconfig_DATA += nginx/nginx.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx/README.md nginx/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
new file mode 100644
index 000000000..007f45c7c
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -0,0 +1,45 @@
+# nginx
+
+This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
+
+**Requirements:**
+ * nginx with configured 'ngx_http_stub_status_module'
+ * 'location /stub_status'
+
+Example nginx configuration can be found in 'python.d/nginx.conf'
+
+It produces following charts:
+
+1. **Active Connections**
+ * active
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Active Connections by Status**
+ * reading
+ * writing
+ * waiting
+
+4. **Connections Rate** in connections/s
+ * accepts
+ * handled
+
+### configuration
+
+Needs only `url` to server's `stub_status`
+
+Here is an example for local server:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/stub_status'
+ retries : 10
+```
+
+Without configuration, module attempts to connect to `http://localhost/stub_status`
+
+---
diff --git a/python.d/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
index 2e4f0d1b5..09c6bbd37 100644
--- a/python.d/nginx.chart.py
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: nginx netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.UrlService import UrlService
@@ -25,28 +26,32 @@ CHARTS = {
'options': [None, 'nginx Active Connections', 'connections', 'active connections',
'nginx.connections', 'line'],
'lines': [
- ["active"]
- ]},
+ ['active']
+ ]
+ },
'requests': {
'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
'lines': [
- ["requests", None, 'incremental']
- ]},
+ ['requests', None, 'incremental']
+ ]
+ },
'connection_status': {
'options': [None, 'nginx Active Connections by Status', 'connections', 'status',
'nginx.connection_status', 'line'],
'lines': [
- ["reading"],
- ["writing"],
- ["waiting", "idle"]
- ]},
+ ['reading'],
+ ['writing'],
+ ['waiting', 'idle']
+ ]
+ },
'connect_rate': {
'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate',
'nginx.connect_rate', 'line'],
'lines': [
- ["accepts", "accepted", "incremental"],
- ["handled", None, "incremental"]
- ]}
+ ['accepts', 'accepted', 'incremental'],
+ ['handled', None, 'incremental']
+ ]
+ }
}
diff --git a/conf.d/python.d/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf
index 71c521066..71c521066 100644
--- a/conf.d/python.d/nginx.conf
+++ b/collectors/python.d.plugin/nginx/nginx.conf
diff --git a/collectors/python.d.plugin/nginx_plus/Makefile.inc b/collectors/python.d.plugin/nginx_plus/Makefile.inc
new file mode 100644
index 000000000..d3fdeaf2b
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nginx_plus/nginx_plus.chart.py
+dist_pythonconfig_DATA += nginx_plus/nginx_plus.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx_plus/README.md nginx_plus/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
new file mode 100644
index 000000000..43ec867a3
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -0,0 +1,125 @@
+# nginx_plus
+
+This module will monitor one or more nginx_plus servers depending on configuration.
+Servers can be either local or remote.
+
+Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
+
+It produces following charts:
+
+1. **Requests total** in requests/s
+ * total
+
+2. **Requests current** in requests
+ * current
+
+3. **Connection Statistics** in connections/s
+ * accepted
+ * dropped
+
+4. **Workers Statistics** in workers
+ * idle
+ * active
+
+5. **SSL Handshakes** in handshakes/s
+ * successful
+ * failed
+
+6. **SSL Session Reuses** in sessions/s
+ * reused
+
+7. **SSL Memory Usage** in percent
+ * usage
+
+8. **Processes** in processes
+ * respawned
+
+For every server zone:
+
+1. **Processing** in requests
+ * processing
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Responses** in requests/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+4. **Traffic** in kilobits/s
+ * received
+ * sent
+
+For every upstream:
+
+1. **Peers Requests** in requests/s
+ * peer name (dimension per peer)
+
+2. **All Peers Responses** in responses/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+3. **Peer Responses** in requests/s (for every peer)
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+4. **Peers Connections** in active
+ * peer name (dimension per peer)
+
+5. **Peers Connections Usage** in percent
+ * peer name (dimension per peer)
+
+6. **All Peers Traffic** in KB
+ * received
+ * sent
+
+7. **Peer Traffic** in KB/s (for every peer)
+ * received
+ * sent
+
+8. **Peer Timings** in ms (for every peer)
+ * header
+ * response
+
+9. **Memory Usage** in percent
+ * usage
+
+10. **Peers Status** in state
+ * peer name (dimension per peer)
+
+11. **Peers Total Downtime** in seconds
+ * peer name (dimension per peer)
+
+For every cache:
+
+1. **Traffic** in KB
+ * served
+ * written
+ * bypass
+
+2. **Memory Usage** in percent
+ * usage
+
+### configuration
+
+Needs only `url` to server's `status`
+
+Here is an example for local server:
+
+```yaml
+local:
+ url : 'http://localhost/status'
+```
+
+Without configuration, module fail to start.
+
+---
diff --git a/python.d/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
index 509ddd380..1392f5a56 100644
--- a/python.d/nginx_plus.chart.py
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: nginx_plus netdata python.d module
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import re
@@ -21,63 +22,71 @@ priority = 60000
retries = 60
# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['requests_total', 'requests_current',
- 'connections_statistics', 'connections_workers',
- 'ssl_handshakes', 'ssl_session_reuses', 'ssl_memory_usage',
- 'processes']
+ORDER = [
+ 'requests_total',
+ 'requests_current',
+ 'connections_statistics',
+ 'connections_workers',
+ 'ssl_handshakes',
+ 'ssl_session_reuses',
+ 'ssl_memory_usage',
+ 'processes'
+]
CHARTS = {
'requests_total': {
- 'options': [None, 'Requests Total', 'requests/s',
- 'requests', 'nginx_plus.requests_total', 'line'],
+ 'options': [None, 'Requests Total', 'requests/s', 'requests', 'nginx_plus.requests_total', 'line'],
'lines': [
['requests_total', 'total', 'incremental']
- ]},
+ ]
+ },
'requests_current': {
- 'options': [None, 'Requests Current', 'requests',
- 'requests', 'nginx_plus.requests_current', 'line'],
+ 'options': [None, 'Requests Current', 'requests', 'requests', 'nginx_plus.requests_current', 'line'],
'lines': [
['requests_current', 'current']
- ]},
+ ]
+ },
'connections_statistics': {
'options': [None, 'Connections Statistics', 'connections/s',
'connections', 'nginx_plus.connections_statistics', 'stacked'],
'lines': [
['connections_accepted', 'accepted', 'incremental'],
['connections_dropped', 'dropped', 'incremental']
- ]},
+ ]
+ },
'connections_workers': {
'options': [None, 'Workers Statistics', 'workers',
'connections', 'nginx_plus.connections_workers', 'stacked'],
'lines': [
['connections_idle', 'idle'],
['connections_active', 'active']
- ]},
+ ]
+ },
'ssl_handshakes': {
- 'options': [None, 'SSL Handshakes', 'handshakes/s',
- 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'],
+ 'options': [None, 'SSL Handshakes', 'handshakes/s', 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'],
'lines': [
['ssl_handshakes', 'successful', 'incremental'],
['ssl_handshakes_failed', 'failed', 'incremental']
- ]},
+ ]
+ },
'ssl_session_reuses': {
- 'options': [None, 'Session Reuses', 'sessions/s',
- 'ssl', 'nginx_plus.ssl_session_reuses', 'line'],
+ 'options': [None, 'Session Reuses', 'sessions/s', 'ssl', 'nginx_plus.ssl_session_reuses', 'line'],
'lines': [
['ssl_session_reuses', 'reused', 'incremental']
- ]},
+ ]
+ },
'ssl_memory_usage': {
- 'options': [None, 'Memory Usage', '%',
- 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', '%', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
'lines': [
['ssl_memory_usage', 'usage', 'absolute', 1, 100]
- ]},
+ ]
+ },
'processes': {
- 'options': [None, 'Processes', 'processes',
- 'processes', 'nginx_plus.processes', 'line'],
+ 'options': [None, 'Processes', 'processes', 'processes', 'nginx_plus.processes', 'line'],
'lines': [
['processes_respawned', 'respawned']
- ]}
+ ]
+ }
}
@@ -86,17 +95,15 @@ def cache_charts(cache):
charts = OrderedDict()
charts['{0}_traffic'.format(cache.name)] = {
- 'options': [None, 'Traffic', 'KB', family,
- 'nginx_plus.cache_traffic', 'stacked'],
+ 'options': [None, 'Traffic', 'KB', family, 'nginx_plus.cache_traffic', 'stacked'],
'lines': [
['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
['_'.join([cache.name, 'miss_bytes']), 'bypass', 'absolute', 1, 1024]
- ]
+ ]
}
charts['{0}_memory_usage'.format(cache.name)] = {
- 'options': [None, 'Memory Usage', '%', family,
- 'nginx_plus.cache_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.cache_memory_usage', 'area'],
'lines': [
['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
]
@@ -160,8 +167,7 @@ def web_upstream_charts(wu):
# Requests
charts['web_upstream_{name}_requests'.format(name=wu.name)] = {
- 'options': [None, 'Peers Requests', 'requests/s', family,
- 'nginx_plus.web_upstream_requests', 'line'],
+ 'options': [None, 'Peers Requests', 'requests/s', family, 'nginx_plus.web_upstream_requests', 'line'],
'lines': dimensions('requests', 'incremental')
}
# Responses Codes
@@ -177,7 +183,7 @@ def web_upstream_charts(wu):
]
}
for peer in wu:
- charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.id)] = {
+ charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.server)] = {
'options': [None, 'Peer "{0}" Responses'.format(peer.real_server), 'responses/s', family,
'nginx_plus.web_upstream_peer_responses', 'stacked'],
'lines': [
@@ -190,26 +196,23 @@ def web_upstream_charts(wu):
}
# Connections
charts['web_upstream_{name}_connections'.format(name=wu.name)] = {
- 'options': [None, 'Peers Connections', 'active', family,
- 'nginx_plus.web_upstream_connections', 'line'],
+ 'options': [None, 'Peers Connections', 'active', family, 'nginx_plus.web_upstream_connections', 'line'],
'lines': dimensions('active')
}
charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
- 'options': [None, 'Peers Connections Usage', '%', family,
- 'nginx_plus.web_upstream_connections_usage', 'line'],
+ 'options': [None, 'Peers Connections Usage', '%', family, 'nginx_plus.web_upstream_connections_usage', 'line'],
'lines': dimensions('connections_usage', d=100)
}
# Traffic
charts['web_upstream_{0}_all_net'.format(wu.name)] = {
- 'options': [None, 'All Peers Traffic', 'kilobits/s', family,
- 'nginx_plus.web_upstream_all_net', 'area'],
+ 'options': [None, 'All Peers Traffic', 'kilobits/s', family, 'nginx_plus.web_upstream_all_net', 'area'],
'lines': [
['{0}_received'.format(wu.name), 'received', 'incremental', 1, 1000],
['{0}_sent'.format(wu.name), 'sent', 'incremental', -1, 1000]
]
}
for peer in wu:
- charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.id)] = {
+ charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.server)] = {
'options': [None, 'Peer "{0}" Traffic'.format(peer.real_server), 'kilobits/s', family,
'nginx_plus.web_upstream_peer_traffic', 'area'],
'lines': [
@@ -219,7 +222,7 @@ def web_upstream_charts(wu):
}
# Response Time
for peer in wu:
- charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.id)] = {
+ charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family,
'nginx_plus.web_upstream_peer_timings', 'line'],
'lines': [
@@ -229,30 +232,27 @@ def web_upstream_charts(wu):
}
# Memory Usage
charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
- 'options': [None, 'Memory Usage', '%', family,
- 'nginx_plus.web_upstream_memory_usage', 'area'],
+ 'options': [None, 'Memory Usage', '%', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
'lines': [
['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
]
}
# State
charts['web_upstream_{name}_status'.format(name=wu.name)] = {
- 'options': [None, 'Peers Status', 'state', family,
- 'nginx_plus.web_upstream_status', 'line'],
+ 'options': [None, 'Peers Status', 'state', family, 'nginx_plus.web_upstream_status', 'line'],
'lines': dimensions('state')
}
# Downtime
charts['web_upstream_{name}_downtime'.format(name=wu.name)] = {
- 'options': [None, 'Peers Downtime', 'seconds', family,
- 'nginx_plus.web_upstream_peer_downtime', 'line'],
+ 'options': [None, 'Peers Downtime', 'seconds', family, 'nginx_plus.web_upstream_peer_downtime', 'line'],
'lines': dimensions('downtime', d=1000)
}
return charts
-METRICS = dict(
- SERVER=[
+METRICS = {
+ 'SERVER': [
'processes.respawned',
'connections.accepted',
'connections.dropped',
@@ -266,7 +266,7 @@ METRICS = dict(
'slabs.SSL.pages.free',
'slabs.SSL.pages.used'
],
- WEB_ZONE=[
+ 'WEB_ZONE': [
'processing',
'requests',
'responses.1xx',
@@ -278,7 +278,7 @@ METRICS = dict(
'received',
'sent'
],
- WEB_UPSTREAM_PEER=[
+ 'WEB_UPSTREAM_PEER': [
'id',
'server',
'name',
@@ -297,7 +297,7 @@ METRICS = dict(
'received',
'downtime'
],
- WEB_UPSTREAM_SUMMARY=[
+ 'WEB_UPSTREAM_SUMMARY': [
'responses.1xx',
'responses.2xx',
'responses.3xx',
@@ -306,13 +306,13 @@ METRICS = dict(
'sent',
'received'
],
- CACHE=[
+ 'CACHE': [
'hit.bytes', # served
'miss.bytes_written', # written
'miss.bytes' # bypass
]
-)
+}
BAD_SYMBOLS = re.compile(r'[:/.-]+')
@@ -373,6 +373,7 @@ class WebUpstream:
return peer
def peers_stats(self, peers):
+ peers = {int(peer['id']): peer for peer in peers}
data = dict()
for peer in self.peers.values():
if not peer.active:
diff --git a/conf.d/python.d/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
index 7b5c8f43f..7b5c8f43f 100644
--- a/conf.d/python.d/nginx_plus.conf
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
diff --git a/collectors/python.d.plugin/nsd/Makefile.inc b/collectors/python.d.plugin/nsd/Makefile.inc
new file mode 100644
index 000000000..58e9fd67d
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nsd/nsd.chart.py
+dist_pythonconfig_DATA += nsd/nsd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nsd/README.md nsd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
new file mode 100644
index 000000000..02c302f41
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -0,0 +1,54 @@
+# nsd
+
+Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
+
+**Requirements:**
+ * Version of `nsd` must be 4.0+
+ * Netdata must have permissions to run `nsd-control stats_noreset`
+
+It produces:
+
+1. **Queries**
+ * queries
+
+2. **Zones**
+ * master
+ * slave
+
+3. **Protocol**
+ * udp
+ * udp6
+ * tcp
+ * tcp6
+
+4. **Query Type**
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * HINFO
+ * MX
+ * NAPTR
+ * TXT
+ * AAAA
+ * SRV
+ * ANY
+
+5. **Transfer**
+ * NOTIFY
+ * AXFR
+
+6. **Return Code**
+ * NOERROR
+ * FORMERR
+ * SERVFAIL
+ * NXDOMAIN
+ * NOTIMP
+ * REFUSED
+ * YXDOMAIN
+
+
+Configuration is not needed.
+
+---
diff --git a/python.d/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
index 499dfda2e..d713f46bd 100644
--- a/python.d/nsd.chart.py
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: NSD `nsd-control stats_noreset` netdata python.d module
# Author: <383c57 at gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
import re
@@ -16,27 +17,29 @@ ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
CHARTS = {
'queries': {
- 'options': [
- None, "queries", 'queries/s', 'queries', 'nsd.queries', 'line'],
+ 'options': [None, 'queries', 'queries/s', 'queries', 'nsd.queries', 'line'],
'lines': [
- ['num_queries', 'queries', 'incremental'],]},
+ ['num_queries', 'queries', 'incremental']
+ ]
+ },
'zones': {
- 'options': [
- None, "zones", 'zones', 'zones', 'nsd.zones', 'stacked'],
+ 'options': [None, 'zones', 'zones', 'zones', 'nsd.zones', 'stacked'],
'lines': [
['zone_master', 'master', 'absolute'],
- ['zone_slave', 'slave', 'absolute'],]},
+ ['zone_slave', 'slave', 'absolute']
+ ]
+ },
'protocol': {
- 'options': [
- None, "protocol", 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
+ 'options': [None, 'protocol', 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
'lines': [
['num_udp', 'udp', 'incremental'],
['num_udp6', 'udp6', 'incremental'],
['num_tcp', 'tcp', 'incremental'],
- ['num_tcp6', 'tcp6', 'incremental'],]},
+ ['num_tcp6', 'tcp6', 'incremental']
+ ]
+ },
'type': {
- 'options': [
- None, "query type", 'queries/s', 'query type', 'nsd.type', 'stacked'],
+ 'options': [None, 'query type', 'queries/s', 'query type', 'nsd.type', 'stacked'],
'lines': [
['num_type_A', 'A', 'incremental'],
['num_type_NS', 'NS', 'incremental'],
@@ -49,16 +52,18 @@ CHARTS = {
['num_type_TXT', 'TXT', 'incremental'],
['num_type_AAAA', 'AAAA', 'incremental'],
['num_type_SRV', 'SRV', 'incremental'],
- ['num_type_TYPE255', 'ANY', 'incremental'],]},
+ ['num_type_TYPE255', 'ANY', 'incremental']
+ ]
+ },
'transfer': {
- 'options': [
- None, "transfer", 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
+ 'options': [None, 'transfer', 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
'lines': [
['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
- ['num_type_TYPE252', 'AXFR', 'incremental'],]},
+ ['num_type_TYPE252', 'AXFR', 'incremental']
+ ]
+ },
'rcode': {
- 'options': [
- None, "return code", 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
+ 'options': [None, 'return code', 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
'lines': [
['num_rcode_NOERROR', 'NOERROR', 'incremental'],
['num_rcode_FORMERR', 'FORMERR', 'incremental'],
@@ -66,7 +71,9 @@ CHARTS = {
['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
['num_rcode_REFUSED', 'REFUSED', 'incremental'],
- ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental'],]}
+ ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental']
+ ]
+ }
}
@@ -74,7 +81,7 @@ class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(
self, configuration=configuration, name=name)
- self.command = "nsd-control stats_noreset"
+ self.command = 'nsd-control stats_noreset'
self.order = ORDER
self.definitions = CHARTS
self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
diff --git a/conf.d/python.d/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf
index 078e97216..078e97216 100644
--- a/conf.d/python.d/nsd.conf
+++ b/collectors/python.d.plugin/nsd/nsd.conf
diff --git a/collectors/python.d.plugin/ntpd/Makefile.inc b/collectors/python.d.plugin/ntpd/Makefile.inc
new file mode 100644
index 000000000..81210ebab
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ntpd/ntpd.chart.py
+dist_pythonconfig_DATA += ntpd/ntpd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ntpd/README.md ntpd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
new file mode 100644
index 000000000..b0fa17fde
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -0,0 +1,71 @@
+# ntpd
+
+Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
+
+**Requirements:**
+ * Version: `NTPv4`
+ * Local interrogation allowed in `/etc/ntp.conf` (default):
+
+```
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+```
+
+It produces:
+
+1. system
+ * offset
+ * jitter
+ * frequency
+ * delay
+ * dispersion
+ * stratum
+ * tc
+ * precision
+
+2. peers
+ * offset
+ * delay
+ * dispersion
+ * jitter
+ * rootdelay
+ * rootdispersion
+ * stratum
+ * hmode
+ * pmode
+ * hpoll
+ * ppoll
+ * precision
+
+**configuration**
+
+Sample:
+
+```yaml
+update_every: 10
+
+host: 'localhost'
+port: '123'
+show_peers: yes
+# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16
+peer_filter: '(127\..*)|(192\.168\..*)'
+# check for new/changed peers every 60 updates
+peer_rescan: 60
+```
+
+Sample (multiple jobs):
+
+Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`.
+
+```yaml
+local:
+ host: 'localhost'
+
+otherhost:
+ host: 'otherhost'
+```
+
+If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
+
+---
diff --git a/python.d/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
index 05209da87..79d557c80 100644
--- a/python.d/ntpd.chart.py
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -2,6 +2,7 @@
# Description: ntpd netdata python.d module
# Author: Sven Mäder (rda0)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import struct
import re
@@ -56,108 +57,117 @@ CHARTS = {
'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'],
'lines': [
['offset', 'offset', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_jitter': {
'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'],
'lines': [
['sys_jitter', 'system', 'absolute', 1, PRECISION],
['clk_jitter', 'clock', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_frequency': {
'options': [None, 'Frequency offset relative to hardware clock', 'ppm', 'system', 'ntpd.sys_frequency', 'area'],
'lines': [
['frequency', 'frequency', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_wander': {
'options': [None, 'Clock frequency wander', 'ppm', 'system', 'ntpd.sys_wander', 'area'],
'lines': [
['clk_wander', 'clock', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_rootdelay': {
'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system',
'ntpd.sys_rootdelay', 'area'],
'lines': [
['rootdelay', 'delay', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_rootdisp': {
'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system',
'ntpd.sys_rootdisp', 'area'],
'lines': [
['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_stratum': {
'options': [None, 'Stratum (1-15)', 'stratum', 'system', 'ntpd.sys_stratum', 'line'],
'lines': [
['stratum', 'stratum', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_tc': {
'options': [None, 'Time constant and poll exponent (3-17)', 'log2 s', 'system', 'ntpd.sys_tc', 'line'],
'lines': [
['tc', 'current', 'absolute', 1, PRECISION],
['mintc', 'minimum', 'absolute', 1, PRECISION]
- ]},
+ ]
+ },
'sys_precision': {
'options': [None, 'Precision', 'log2 s', 'system', 'ntpd.sys_precision', 'line'],
'lines': [
['precision', 'precision', 'absolute', 1, PRECISION]
- ]}
+ ]
+ }
}
PEER_CHARTS = {
'peer_offset': {
'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_delay': {
'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_dispersion': {
'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_jitter': {
'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_xleave': {
'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_rootdelay': {
'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers',
'ntpd.peer_rootdelay', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_rootdisp': {
'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'peers',
'ntpd.peer_rootdisp', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_stratum': {
'options': [None, 'Stratum (1-15)', 'stratum', 'peers', 'ntpd.peer_stratum', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_hmode': {
'options': [None, 'Host mode (1-6)', 'hmode', 'peers', 'ntpd.peer_hmode', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_pmode': {
'options': [None, 'Peer mode (1-5)', 'pmode', 'peers', 'ntpd.peer_pmode', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_hpoll': {
'options': [None, 'Host poll exponent', 'log2 s', 'peers', 'ntpd.peer_hpoll', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_ppoll': {
'options': [None, 'Peer poll exponent', 'log2 s', 'peers', 'ntpd.peer_ppoll', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'peer_precision': {
'options': [None, 'Precision', 'log2 s', 'peers', 'ntpd.peer_precision', 'line'],
- 'lines': [
- ]}
+ 'lines': []
+ }
}
diff --git a/conf.d/python.d/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf
index 7adc4074b..7adc4074b 100644
--- a/conf.d/python.d/ntpd.conf
+++ b/collectors/python.d.plugin/ntpd/ntpd.conf
diff --git a/collectors/python.d.plugin/ovpn_status_log/Makefile.inc b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc
new file mode 100644
index 000000000..1fbc506d6
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ovpn_status_log/ovpn_status_log.chart.py
+dist_pythonconfig_DATA += ovpn_status_log/ovpn_status_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ovpn_status_log/README.md ovpn_status_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
new file mode 100644
index 000000000..be1ea279e
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -0,0 +1,32 @@
+# ovpn_status_log
+
+Module monitor openvpn-status log file.
+
+**Requirements:**
+
+ * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+ so that multiple instances do not overwrite each other's output files.
+
+ * Make sure NETDATA USER CAN READ openvpn-status.log
+
+ * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+
+It produces:
+
+1. **Users** OpenVPN active users
+ * users
+
+2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
+ * in
+ * out
+
+### configuration
+
+Sample:
+
+```yaml
+default
+ log_path : '/var/log/openvpn-status.log'
+```
+
+---
diff --git a/python.d/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
index 519c77fa3..64d7062d9 100644
--- a/python.d/ovpn_status_log.chart.py
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: openvpn status log netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from re import compile as r_compile
@@ -16,15 +17,19 @@ CHARTS = {
'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
'lines': [
['users', None, 'absolute'],
- ]},
+ ]
+ },
'traffic': {
'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'],
'lines': [
['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10]
- ]},
-
+ ]
+ }
}
+TLS_REGEX = r_compile(r'(?:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)')
+STATIC_KEY_REGEX = r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)')
+
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
@@ -32,8 +37,10 @@ class Service(SimpleService):
self.order = ORDER
self.definitions = CHARTS
self.log_path = self.configuration.get('log_path')
- self.regex = dict(tls=r_compile(r'\d{1,3}(?:\.\d{1,3}){3}(?::\d+)? (?P<bytes_in>\d+) (?P<bytes_out>\d+)'),
- static_key=r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'))
+ self.regex = {
+ 'tls': TLS_REGEX,
+ 'static_key': STATIC_KEY_REGEX
+ }
def check(self):
if not (self.log_path and isinstance(self.log_path, str)):
@@ -57,7 +64,7 @@ class Service(SimpleService):
break
if found:
return True
- self.error("Failed to parse ovpenvpn log file")
+ self.error('Failed to parse ovpenvpn log file')
return False
def _get_raw_data(self):
@@ -107,8 +114,12 @@ class Service(SimpleService):
data = dict(users=0, bytes_in=0, bytes_out=0)
for row in raw_data:
- row = ' '.join(row.split(',')) if ',' in row else ' '.join(row.split())
- match = self.regex['tls'].search(row)
+ columns = row.split(',') if ',' in row else row.split()
+ if 'UNDEF' in columns:
+ # see https://openvpn.net/archive/openvpn-users/2004-08/msg00116.html
+ continue
+
+ match = self.regex['tls'].search(' '.join(columns))
if match:
match = match.groupdict()
data['users'] += 1
diff --git a/conf.d/python.d/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
index 907f014f5..6fb35a530 100644
--- a/conf.d/python.d/ovpn_status_log.conf
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
@@ -88,8 +88,12 @@
# # ps -C openvpn -o command=
# /usr/sbin/openvpn --daemon ovpn-server --status /run/openvpn/server.status 10 --cd /etc/openvpn --config /etc/openvpn/server.conf
#
+# 4. Confirm status is configured in your OpenVPN configuration.
+# * Open OpenVPN config in an editor (e.g. sudo nano /etc/openvpn/default.conf)
+# * Confirm status is enabled with below:
+# status /var/log/openvpn-status.log
#
#default:
# log_path: '/var/log/openvpn-status.log'
#
-# ---------------------------------------------------------------------- \ No newline at end of file
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/phpfpm/Makefile.inc b/collectors/python.d.plugin/phpfpm/Makefile.inc
new file mode 100644
index 000000000..ff312fe18
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += phpfpm/phpfpm.chart.py
+dist_pythonconfig_DATA += phpfpm/phpfpm.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc
+
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
new file mode 100644
index 000000000..66930463f
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -0,0 +1,40 @@
+# phpfpm
+
+This module will monitor one or more php-fpm instances depending on configuration.
+
+**Requirements:**
+ * php-fpm with enabled `status` page
+ * access to `status` page via web server
+
+It produces following charts:
+
+1. **Active Connections**
+ * active
+ * maxActive
+ * idle
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Performance**
+ * reached
+ * slow
+
+### configuration
+
+Needs only `url` to server's `status`
+
+Here is an example for local instance:
+
+```yaml
+update_every : 3
+priority : 90100
+
+local:
+ url : 'http://localhost/status'
+ retries : 10
+```
+
+Without configuration, module attempts to connect to `http://localhost/status`
+
+---
diff --git a/python.d/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
index ea7a9a7e6..a3f0963fc 100644
--- a/python.d/phpfpm.chart.py
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description: PHP-FPM netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
import re
@@ -58,18 +60,21 @@ CHARTS = {
['active'],
['maxActive', 'max active'],
['idle']
- ]},
+ ]
+ },
'requests': {
'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
'lines': [
['requests', None, 'incremental']
- ]},
+ ]
+ },
'performance': {
'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
'lines': [
['reached', 'max children reached'],
['slow', 'slow requests']
- ]},
+ ]
+ },
'request_duration': {
'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration',
'line'],
@@ -77,21 +82,24 @@ CHARTS = {
['minReqDur', 'min', 'absolute', 1, 1000],
['maxReqDur', 'max', 'absolute', 1, 1000],
['avgReqDur', 'avg', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'request_cpu': {
'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
'lines': [
['minReqCpu', 'min'],
['maxReqCpu', 'max'],
['avgReqCpu', 'avg']
- ]},
+ ]
+ },
'request_mem': {
'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
'lines': [
['minReqMem', 'min', 'absolute', 1, 1024],
['maxReqMem', 'max', 'absolute', 1, 1024],
['avgReqMem', 'avg', 'absolute', 1, 1024]
- ]}
+ ]
+ }
}
diff --git a/conf.d/python.d/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf
index 08688e2fa..571eb9156 100644
--- a/conf.d/python.d/phpfpm.conf
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.conf
@@ -86,5 +86,5 @@ localipv4:
localipv6:
name : 'local'
- url : "http://::1/status?full&json"
+ url : "http://[::1]/status?full&json"
diff --git a/collectors/python.d.plugin/portcheck/Makefile.inc b/collectors/python.d.plugin/portcheck/Makefile.inc
new file mode 100644
index 000000000..76763f02f
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += portcheck/portcheck.chart.py
+dist_pythonconfig_DATA += portcheck/portcheck.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += portcheck/README.md portcheck/Makefile.inc
+
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
new file mode 100644
index 000000000..f1338d576
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -0,0 +1,35 @@
+# portcheck
+
+Module monitors a remote TCP service.
+
+Following charts are drawn per host:
+
+1. **Latency** ms
+ * Time required to connect to a TCP port.
+ Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
+
+2. **Status** boolean
+ * Connection successful
+ * Could not create socket: possible DNS problems
+ * Connection refused: port not listening or blocked
+ * Connection timed out: host or port unreachable
+
+
+### configuration
+
+```yaml
+server:
+ host: 'dns or ip' # required
+ port: 22 # required
+ timeout: 1 # optional
+ update_every: 1 # optional
+```
+
+### notes
+
+ * The error chart is intended for alarms, badges or for access via API.
+ * A system/service/firewall might block netdata's access if a portscan or
+ similar is detected.
+ * Currently, the accuracy of the latency is low and should be used as reference only.
+
+---
diff --git a/python.d/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
index 0a312210d..e86f82544 100644
--- a/python.d/portcheck.chart.py
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: simple port check netdata python.d module
# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
import socket
@@ -36,7 +37,8 @@ CHARTS = {
[PORT_SUCCESS, 'success', 'absolute'],
[PORT_TIMEOUT, 'timeout', 'absolute'],
[PORT_FAILED, 'no connection', 'absolute']
- ]}
+ ]
+ }
}
@@ -56,13 +58,13 @@ class Service(SimpleService):
:return: boolean
"""
if self.host is None or self.port is None:
- self.error("Host or port missing")
+ self.error('Host or port missing')
return False
if not isinstance(self.port, int):
self.error('"port" is not an integer. Specify a numerical value, not service name.')
return False
- self.debug("Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s".format(
+ self.debug('Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s'.format(
host=self.host, port=self.port, update=self.update_every, timeout=self.timeout
))
# We will accept any (valid-ish) configuration, even if initial connection fails (a service might be down from
@@ -101,7 +103,7 @@ class Service(SimpleService):
return data
def _create_socket(self, socket_config):
- af, sock_type, proto, canon_name, sa = socket_config
+ af, sock_type, proto, _, sa = socket_config
try:
self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
sock = socket.socket(af, sock_type, proto)
@@ -119,7 +121,7 @@ class Service(SimpleService):
:return: dict
"""
- af, sock_type, proto, canon_name, sa = socket_config
+ af, _, proto, _, sa = socket_config
port = str(sa[1])
try:
self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port))
diff --git a/conf.d/python.d/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf
index b3dd8bd3f..b3dd8bd3f 100644
--- a/conf.d/python.d/portcheck.conf
+++ b/collectors/python.d.plugin/portcheck/portcheck.conf
diff --git a/collectors/python.d.plugin/postfix/Makefile.inc b/collectors/python.d.plugin/postfix/Makefile.inc
new file mode 100644
index 000000000..f4091b217
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += postfix/postfix.chart.py
+dist_pythonconfig_DATA += postfix/postfix.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postfix/README.md postfix/Makefile.inc
+
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
new file mode 100644
index 000000000..77c95ff44
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -0,0 +1,15 @@
+# postfix
+
+Simple module executing `postfix -p` to grab postfix queue.
+
+It produces only two charts:
+
+1. **Postfix Queue Emails**
+ * emails
+
+2. **Postfix Queue Emails Size** in KB
+ * size
+
+Configuration is not needed.
+
+---
diff --git a/python.d/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
index a2129e4be..bdbd0feea 100644
--- a/python.d/postfix.chart.py
+++ b/collectors/python.d.plugin/postfix/postfix.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: postfix netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.ExecutableService import ExecutableService
@@ -14,22 +15,24 @@ ORDER = ['qemails', 'qsize']
CHARTS = {
'qemails': {
- 'options': [None, "Postfix Queue Emails", "emails", 'queue', 'postfix.qemails', 'line'],
+ 'options': [None, 'Postfix Queue Emails', 'emails', 'queue', 'postfix.qemails', 'line'],
'lines': [
['emails', None, 'absolute']
- ]},
+ ]
+ },
'qsize': {
- 'options': [None, "Postfix Queue Emails Size", "emails size in KB", 'queue', 'postfix.qsize', 'area'],
+ 'options': [None, 'Postfix Queue Emails Size', 'emails size in KB', 'queue', 'postfix.qsize', 'area'],
'lines': [
- ["size", None, 'absolute']
- ]}
+ ['size', None, 'absolute']
+ ]
+ }
}
class Service(ExecutableService):
def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name)
- self.command = "postqueue -p"
+ self.command = 'postqueue -p'
self.order = ORDER
self.definitions = CHARTS
diff --git a/conf.d/python.d/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf
index e0d5a5f83..e0d5a5f83 100644
--- a/conf.d/python.d/postfix.conf
+++ b/collectors/python.d.plugin/postfix/postfix.conf
diff --git a/collectors/python.d.plugin/postgres/Makefile.inc b/collectors/python.d.plugin/postgres/Makefile.inc
new file mode 100644
index 000000000..91a185cb9
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += postgres/postgres.chart.py
+dist_pythonconfig_DATA += postgres/postgres.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postgres/README.md postgres/Makefile.inc
+
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
new file mode 100644
index 000000000..e7b108d36
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -0,0 +1,68 @@
+# postgres
+
+Module monitors one or more postgres servers.
+
+**Requirements:**
+
+ * `python-psycopg2` package. You have to install it manually.
+
+Following charts are drawn:
+
+1. **Database size** MB
+ * size
+
+2. **Current Backend Processes** processes
+ * active
+
+3. **Write-Ahead Logging Statistics** files/s
+ * total
+ * ready
+ * done
+
+4. **Checkpoints** writes/s
+ * scheduled
+ * requested
+
+5. **Current connections to db** count
+ * connections
+
+6. **Tuples returned from db** tuples/s
+ * sequential
+ * bitmap
+
+7. **Tuple reads from db** reads/s
+ * disk
+ * cache
+
+8. **Transactions on db** transactions/s
+ * committed
+ * rolled back
+
+9. **Tuples written to db** writes/s
+ * inserted
+ * updated
+ * deleted
+ * conflicts
+
+10. **Locks on db** count per type
+ * locks
+
+### configuration
+
+```yaml
+socket:
+ name : 'socket'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'tcp'
+ user : 'postgres'
+ database : 'postgres'
+ host : 'localhost'
+ port : 5432
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
+
+---
diff --git a/python.d/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
index 0522b1938..7f43877c3 100644
--- a/python.d/postgres.chart.py
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: example netdata python.d module
# Authors: facetoe, dangtranhoang
+# SPDX-License-Identifier: GPL-3.0-or-later
from copy import deepcopy
@@ -20,221 +21,310 @@ update_every = 1
priority = 60000
retries = 60
-METRICS = dict(
- DATABASE=['connections',
- 'xact_commit',
- 'xact_rollback',
- 'blks_read',
- 'blks_hit',
- 'tup_returned',
- 'tup_fetched',
- 'tup_inserted',
- 'tup_updated',
- 'tup_deleted',
- 'conflicts',
- 'temp_files',
- 'temp_bytes',
- 'size'],
- BACKENDS=['backends_active',
- 'backends_idle'],
- INDEX_STATS=['index_count',
- 'index_size'],
- TABLE_STATS=['table_size',
- 'table_count'],
- WAL=['written_wal',
- 'recycled_wal',
- 'total_wal'],
- WAL_WRITES=['wal_writes'],
- ARCHIVE=['ready_count',
- 'done_count',
- 'file_count'],
- BGWRITER=['checkpoint_scheduled',
- 'checkpoint_requested',
- 'buffers_checkpoint',
- 'buffers_clean',
- 'maxwritten_clean',
- 'buffers_backend',
- 'buffers_alloc',
- 'buffers_backend_fsync'],
- LOCKS=['ExclusiveLock',
- 'RowShareLock',
- 'SIReadLock',
- 'ShareUpdateExclusiveLock',
- 'AccessExclusiveLock',
- 'AccessShareLock',
- 'ShareRowExclusiveLock',
- 'ShareLock',
- 'RowExclusiveLock'],
- AUTOVACUUM=['analyze',
- 'vacuum_analyze',
- 'vacuum',
- 'vacuum_freeze',
- 'brin_summarize'],
- STANDBY_DELTA=['sent_delta',
- 'write_delta',
- 'flush_delta',
- 'replay_delta'],
- REPSLOT_FILES=['replslot_wal_keep',
- 'replslot_files']
-
-)
-
-QUERIES = dict(
- WAL="""
+METRICS = {
+ 'DATABASE': [
+ 'connections',
+ 'xact_commit',
+ 'xact_rollback',
+ 'blks_read',
+ 'blks_hit',
+ 'tup_returned',
+ 'tup_fetched',
+ 'tup_inserted',
+ 'tup_updated',
+ 'tup_deleted',
+ 'conflicts',
+ 'temp_files',
+ 'temp_bytes',
+ 'size'
+ ],
+ 'BACKENDS': [
+ 'backends_active',
+ 'backends_idle'
+ ],
+ 'INDEX_STATS': [
+ 'index_count',
+ 'index_size'
+ ],
+ 'TABLE_STATS': [
+ 'table_size',
+ 'table_count'
+ ],
+ 'WAL': [
+ 'written_wal',
+ 'recycled_wal',
+ 'total_wal'
+ ],
+ 'WAL_WRITES': [
+ 'wal_writes'
+ ],
+ 'ARCHIVE': [
+ 'ready_count',
+ 'done_count',
+ 'file_count'
+ ],
+ 'BGWRITER': [
+ 'checkpoint_scheduled',
+ 'checkpoint_requested',
+ 'buffers_checkpoint',
+ 'buffers_clean',
+ 'maxwritten_clean',
+ 'buffers_backend',
+ 'buffers_alloc',
+ 'buffers_backend_fsync'
+ ],
+ 'LOCKS': [
+ 'ExclusiveLock',
+ 'RowShareLock',
+ 'SIReadLock',
+ 'ShareUpdateExclusiveLock',
+ 'AccessExclusiveLock',
+ 'AccessShareLock',
+ 'ShareRowExclusiveLock',
+ 'ShareLock',
+ 'RowExclusiveLock'
+ ],
+ 'AUTOVACUUM': [
+ 'analyze',
+ 'vacuum_analyze',
+ 'vacuum',
+ 'vacuum_freeze',
+ 'brin_summarize'
+ ],
+ 'STANDBY_DELTA': [
+ 'sent_delta',
+ 'write_delta',
+ 'flush_delta',
+ 'replay_delta'
+ ],
+ 'REPSLOT_FILES': [
+ 'replslot_wal_keep',
+ 'replslot_files'
+ ]
+}
+
+QUERIES = {
+ 'WAL': """
SELECT
- count(*) as total_wal,
- count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
- count(*) FILTER (WHERE type = 'written') AS written_wal
+ count(*) as total_wal,
+ count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
+ count(*) FILTER (WHERE type = 'written') AS written_wal
FROM
- (SELECT wal.name,
- pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ),
- CASE WHEN wal.name > pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ) THEN 'recycled'
- ELSE 'written'
- END AS type
- FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name)
- WHERE name ~ '^[0-9A-F]{{24}}$'
- ORDER BY (pg_stat_file('pg_{0}/'||name)).modification, wal.name DESC) sub;
+ (SELECT
+ wal.name,
+ pg_{0}file_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_{0}_{1}()
+ END ),
+ CASE
+ WHEN wal.name > pg_{0}file_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_{0}_{1}()
+ END ) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{{24}}$'
+ ORDER BY
+ (pg_stat_file('pg_{0}/'||name)).modification,
+ wal.name DESC) sub;
""",
- ARCHIVE="""
+ 'ARCHIVE': """
SELECT
CAST(COUNT(*) AS INT) AS file_count,
- CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count,
- CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
FROM
pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file);
""",
- BACKENDS="""
+ 'BACKENDS': """
SELECT
- count(*) - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') AS backends_active,
- (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle' ) AS backends_idle
-FROM pg_stat_activity;
+ count(*) - (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_active,
+ (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_idle
+FROM pg_stat_activity;
""",
- TABLE_STATS="""
+ 'TABLE_STATS': """
SELECT
- ((sum(relpages) * 8) * 1024) AS table_size,
- count(1) AS table_count
+ ((sum(relpages) * 8) * 1024) AS table_size,
+ count(1) AS table_count
FROM pg_class
WHERE relkind IN ('r', 't');
""",
- INDEX_STATS="""
+ 'INDEX_STATS': """
SELECT
- ((sum(relpages) * 8) * 1024) AS index_size,
- count(1) AS index_count
+ ((sum(relpages) * 8) * 1024) AS index_size,
+ count(1) AS index_count
FROM pg_class
-WHERE relkind = 'i';""",
- DATABASE="""
+WHERE relkind = 'i';
+""",
+ 'DATABASE': """
SELECT
- datname AS database_name,
- numbackends AS connections,
- xact_commit AS xact_commit,
- xact_rollback AS xact_rollback,
- blks_read AS blks_read,
- blks_hit AS blks_hit,
- tup_returned AS tup_returned,
- tup_fetched AS tup_fetched,
- tup_inserted AS tup_inserted,
- tup_updated AS tup_updated,
- tup_deleted AS tup_deleted,
- conflicts AS conflicts,
- pg_database_size(datname) AS size,
- temp_files AS temp_files,
- temp_bytes AS temp_bytes
+ datname AS database_name,
+ numbackends AS connections,
+ xact_commit AS xact_commit,
+ xact_rollback AS xact_rollback,
+ blks_read AS blks_read,
+ blks_hit AS blks_hit,
+ tup_returned AS tup_returned,
+ tup_fetched AS tup_fetched,
+ tup_inserted AS tup_inserted,
+ tup_updated AS tup_updated,
+ tup_deleted AS tup_deleted,
+ conflicts AS conflicts,
+ pg_database_size(datname) AS size,
+ temp_files AS temp_files,
+ temp_bytes AS temp_bytes
FROM pg_stat_database
-WHERE datname IN %(databases)s
-;
+WHERE datname IN %(databases)s ;
""",
- BGWRITER="""
+ 'BGWRITER': """
SELECT
- checkpoints_timed AS checkpoint_scheduled,
- checkpoints_req AS checkpoint_requested,
- buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint,
- buffers_clean * current_setting('block_size')::numeric buffers_clean,
- maxwritten_clean,
- buffers_backend * current_setting('block_size')::numeric buffers_backend,
- buffers_alloc * current_setting('block_size')::numeric buffers_alloc,
- buffers_backend_fsync
+ checkpoints_timed AS checkpoint_scheduled,
+ checkpoints_req AS checkpoint_requested,
+ buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint,
+ buffers_clean * current_setting('block_size')::numeric buffers_clean,
+ maxwritten_clean,
+ buffers_backend * current_setting('block_size')::numeric buffers_backend,
+ buffers_alloc * current_setting('block_size')::numeric buffers_alloc,
+ buffers_backend_fsync
FROM pg_stat_bgwriter;
""",
- LOCKS="""
+ 'LOCKS': """
SELECT
- pg_database.datname as database_name,
- mode,
- count(mode) AS locks_count
+ pg_database.datname as database_name,
+ mode,
+ count(mode) AS locks_count
FROM pg_locks
- INNER JOIN pg_database ON pg_database.oid = pg_locks.database
+INNER JOIN pg_database
+ ON pg_database.oid = pg_locks.database
GROUP BY datname, mode
ORDER BY datname, mode;
""",
- FIND_DATABASES="""
-SELECT datname
+ 'FIND_DATABASES': """
+SELECT
+ datname
FROM pg_stat_database
-WHERE has_database_privilege((SELECT current_user), datname, 'connect')
-AND NOT datname ~* '^template\d+';
+WHERE
+ has_database_privilege(
+ (SELECT current_user), datname, 'connect')
+ AND NOT datname ~* '^template\d ';
""",
- FIND_STANDBY="""
-SELECT application_name
+ 'FIND_STANDBY': """
+SELECT
+ application_name
FROM pg_stat_replication
WHERE application_name IS NOT NULL
GROUP BY application_name;
""",
- FIND_REPLICATION_SLOT="""
+ 'FIND_REPLICATION_SLOT': """
SELECT slot_name
FROM pg_replication_slots;
""",
- STANDBY_DELTA="""
-SELECT application_name,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , sent_{1}) AS sent_delta,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , write_{1}) AS write_delta,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , flush_{1}) AS flush_delta,
- pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , replay_{1}) AS replay_delta
+ 'STANDBY_DELTA': """
+SELECT
+ application_name,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ sent_{1}) AS sent_delta,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ write_{1}) AS write_delta,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ flush_{1}) AS flush_delta,
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ replay_{1}) AS replay_delta
FROM pg_stat_replication
WHERE application_name IS NOT NULL;
""",
- REPSLOT_FILES="""
+ 'REPSLOT_FILES': """
WITH wal_size AS (
- SELECT current_setting('wal_block_size')::INT * setting::INT AS val
- FROM pg_settings
- WHERE name = 'wal_segment_size'
-)
-SELECT slot_name, slot_type, replslot_wal_keep, count(slot_file) AS replslot_files
-FROM (
- SELECT slot.slot_name, CASE WHEN slot_file <> 'state' THEN 1 END AS slot_file , slot_type,
- COALESCE (floor((pg_wal_lsn_diff (pg_current_wal_lsn (),
- slot.restart_lsn) - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)),
- 0) AS replslot_wal_keep
- FROM pg_replication_slots slot
- LEFT JOIN (
- SELECT slot2.slot_name,
- pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
- FROM pg_replication_slots slot2
- ) files (slot_name, slot_file)
- ON slot.slot_name = files.slot_name
- CROSS JOIN wal_size s) AS d
-GROUP BY slot_name, slot_type, replslot_wal_keep;
+ SELECT
+ current_setting('wal_block_size')::INT * setting::INT AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn)
+ - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
""",
- IF_SUPERUSER="""
+ 'IF_SUPERUSER': """
SELECT current_setting('is_superuser') = 'on' AS is_superuser;
""",
- DETECT_SERVER_VERSION="""
+ 'DETECT_SERVER_VERSION': """
SHOW server_version_num;
""",
- AUTOVACUUM="""
+ 'AUTOVACUUM': """
SELECT
- count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
- count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
- count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%'
- AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%'
- AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum,
- count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze,
- count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize
+ count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%'
+ AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%'
+ AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum,
+ count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize
FROM pg_stat_activity
WHERE query NOT LIKE '%%pg_stat_activity%%';
""",
- DIFF_LSN="""
-SELECT pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END, '0/0') as wal_writes ;
+ 'DIFF_LSN': """
+SELECT
+ pg_{0}_{1}_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_{0}_receive_{1}()
+ ELSE pg_current_{0}_{1}()
+ END,
+ '0/0') as wal_writes ;
"""
-
-)
+}
QUERY_STATS = {
@@ -243,11 +333,34 @@ QUERY_STATS = {
QUERIES['LOCKS']: METRICS['LOCKS']
}
-ORDER = ['db_stat_temp_files', 'db_stat_temp_bytes', 'db_stat_blks', 'db_stat_tuple_returned', 'db_stat_tuple_write',
- 'db_stat_transactions','db_stat_connections', 'database_size', 'backend_process', 'index_count', 'index_size',
- 'table_count', 'table_size', 'wal', 'wal_writes', 'archive_wal', 'checkpointer', 'stat_bgwriter_alloc', 'stat_bgwriter_checkpoint',
- 'stat_bgwriter_backend', 'stat_bgwriter_backend_fsync' , 'stat_bgwriter_bgwriter', 'stat_bgwriter_maxwritten',
- 'replication_slot', 'standby_delta', 'autovacuum']
+ORDER = [
+ 'db_stat_temp_files',
+ 'db_stat_temp_bytes',
+ 'db_stat_blks',
+ 'db_stat_tuple_returned',
+ 'db_stat_tuple_write',
+ 'db_stat_transactions',
+ 'db_stat_connections',
+ 'database_size',
+ 'backend_process',
+ 'index_count',
+ 'index_size',
+ 'table_count',
+ 'table_size',
+ 'wal',
+ 'wal_writes',
+ 'archive_wal',
+ 'checkpointer',
+ 'stat_bgwriter_alloc',
+ 'stat_bgwriter_checkpoint',
+ 'stat_bgwriter_backend',
+ 'stat_bgwriter_backend_fsync',
+ 'stat_bgwriter_bgwriter',
+ 'stat_bgwriter_maxwritten',
+ 'replication_slot',
+ 'standby_delta',
+ 'autovacuum'
+]
CHARTS = {
'db_stat_transactions': {
@@ -256,26 +369,30 @@ CHARTS = {
'lines': [
['xact_commit', 'committed', 'incremental'],
['xact_rollback', 'rolled back', 'incremental']
- ]},
+ ]
+ },
'db_stat_connections': {
'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections',
'line'],
'lines': [
['connections', 'connections', 'absolute']
- ]},
+ ]
+ },
'db_stat_blks': {
'options': [None, 'Disk blocks reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_blks', 'line'],
'lines': [
['blks_read', 'disk', 'incremental'],
['blks_hit', 'cache', 'incremental']
- ]},
+ ]
+ },
'db_stat_tuple_returned': {
'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned',
'line'],
'lines': [
['tup_returned', 'sequential', 'incremental'],
['tup_fetched', 'bitmap', 'incremental']
- ]},
+ ]
+ },
'db_stat_tuple_write': {
'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'],
'lines': [
@@ -283,103 +400,128 @@ CHARTS = {
['tup_updated', 'updated', 'incremental'],
['tup_deleted', 'deleted', 'incremental'],
['conflicts', 'conflicts', 'incremental']
- ]},
+ ]
+ },
'db_stat_temp_bytes': {
- 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes', 'line'],
+ 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
+ 'line'],
'lines': [
['temp_bytes', 'size', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'db_stat_temp_files': {
- 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files', 'line'],
+ 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files',
+ 'line'],
'lines': [
['temp_files', 'files', 'incremental']
- ]},
+ ]
+ },
'database_size': {
'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'],
'lines': [
- ]},
+ ]
+ },
'backend_process': {
'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process',
'line'],
'lines': [
['backends_active', 'active', 'absolute'],
['backends_idle', 'idle', 'absolute']
- ]},
+ ]
+ },
'index_count': {
'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
'lines': [
['index_count', 'total', 'absolute']
- ]},
+ ]
+ },
'index_size': {
'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'],
'lines': [
['index_size', 'size', 'absolute', 1, 1024 * 1024]
- ]},
+ ]
+ },
'table_count': {
'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'],
'lines': [
['table_count', 'total', 'absolute']
- ]},
+ ]
+ },
'table_size': {
'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'],
'lines': [
['table_size', 'size', 'absolute', 1, 1024 * 1024]
- ]},
+ ]
+ },
'wal': {
'options': [None, 'Write-Ahead Logs', 'files', 'wal', 'postgres.wal', 'line'],
'lines': [
['written_wal', 'written', 'absolute'],
['recycled_wal', 'recycled', 'absolute'],
['total_wal', 'total', 'absolute']
- ]},
+ ]
+ },
'wal_writes': {
'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'],
'lines': [
['wal_writes', 'writes', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'archive_wal': {
'options': [None, 'Archive Write-Ahead Logs', 'files/s', 'archive wal', 'postgres.archive_wal', 'line'],
'lines': [
['file_count', 'total', 'incremental'],
['ready_count', 'ready', 'incremental'],
['done_count', 'done', 'incremental']
- ]},
+ ]
+ },
'checkpointer': {
'options': [None, 'Checkpoints', 'writes', 'checkpointer', 'postgres.checkpointer', 'line'],
'lines': [
['checkpoint_scheduled', 'scheduled', 'incremental'],
['checkpoint_requested', 'requested', 'incremental']
- ]},
+ ]
+ },
'stat_bgwriter_alloc': {
'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
'lines': [
['buffers_alloc', 'alloc', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'stat_bgwriter_checkpoint': {
- 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_checkpoint', 'line'],
+ 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter',
+ 'postgres.stat_bgwriter_checkpoint', 'line'],
'lines': [
['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'stat_bgwriter_backend': {
- 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_backend', 'line'],
+ 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter',
+ 'postgres.stat_bgwriter_backend', 'line'],
'lines': [
['buffers_backend', 'backend', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'stat_bgwriter_backend_fsync': {
'options': [None, 'Fsync by backend', 'times', 'bgwriter', 'postgres.stat_bgwriter_backend_fsync', 'line'],
'lines': [
['buffers_backend_fsync', 'backend fsync', 'incremental']
- ]},
+ ]
+ },
'stat_bgwriter_bgwriter': {
- 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter', 'postgres.bgwriter_bgwriter', 'line'],
+ 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter',
+ 'postgres.bgwriter_bgwriter', 'line'],
'lines': [
['buffers_clean', 'clean', 'incremental', 1, 1024]
- ]},
+ ]
+ },
'stat_bgwriter_maxwritten': {
- 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten', 'line'],
+ 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten',
+ 'line'],
'lines': [
['maxwritten_clean', 'maxwritten', 'incremental']
- ]},
+ ]
+ },
'autovacuum': {
'options': [None, 'Autovacuum workers', 'workers', 'autovacuum', 'postgres.autovacuum', 'line'],
'lines': [
@@ -388,7 +530,8 @@ CHARTS = {
['vacuum_analyze', 'vacuum analyze', 'absolute'],
['vacuum_freeze', 'vacuum freeze', 'absolute'],
['brin_summarize', 'brin summarize', 'absolute']
- ]},
+ ]
+ },
'standby_delta': {
'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'],
'lines': [
@@ -396,13 +539,15 @@ CHARTS = {
['write_delta', 'write delta', 'absolute', 1, 1024],
['flush_delta', 'flush delta', 'absolute', 1, 1024],
['replay_delta', 'replay delta', 'absolute', 1, 1024]
- ]},
- 'replication_slot': {
+ ]
+ },
+ 'replication_slot': {
'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'],
'lines': [
['replslot_wal_keep', 'wal keeped', 'absolute'],
['replslot_files', 'pg_replslot files', 'absolute']
- ]}
+ ]
+ }
}
@@ -462,7 +607,7 @@ class Service(SimpleService):
cursor.close()
if self.database_poll and isinstance(self.database_poll, str):
- self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()]\
+ self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] \
or self.databases
self.locks_zeroed = populate_lock_types(self.databases)
@@ -482,8 +627,8 @@ class Service(SimpleService):
wal = 'xlog'
lsn = 'location'
self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER']
- self.queries[QUERIES['DIFF_LSN'].format(wal,lsn)] = METRICS['WAL_WRITES']
- self.queries[QUERIES['STANDBY_DELTA'].format(wal,lsn)] = METRICS['STANDBY_DELTA']
+ self.queries[QUERIES['DIFF_LSN'].format(wal, lsn)] = METRICS['WAL_WRITES']
+ self.queries[QUERIES['STANDBY_DELTA'].format(wal, lsn)] = METRICS['STANDBY_DELTA']
if self.index_stats:
self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS']
@@ -492,7 +637,7 @@ class Service(SimpleService):
if is_superuser:
self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE']
if self.server_version >= 90400:
- self.queries[QUERIES['WAL'].format(wal,lsn)] = METRICS['WAL']
+ self.queries[QUERIES['WAL'].format(wal, lsn)] = METRICS['WAL']
if self.server_version >= 100000:
self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES']
if self.server_version >= 90400:
@@ -501,8 +646,8 @@ class Service(SimpleService):
def create_dynamic_charts_(self):
for database_name in self.databases[::-1]:
- self.definitions['database_size']['lines'].append([database_name + '_size',
- database_name, 'absolute', 1, 1024 * 1024])
+ self.definitions['database_size']['lines'].append(
+ [database_name + '_size', database_name, 'absolute', 1, 1024 * 1024])
for chart_name in [name for name in self.order if name.startswith('db_stat')]:
add_database_stat_chart_(order=self.order, definitions=self.definitions,
name=chart_name, database_name=database_name)
@@ -510,17 +655,21 @@ class Service(SimpleService):
add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name)
for application_name in self.secondaries[::-1]:
- add_replication_delta_chart_(order=self.order, definitions=self.definitions,
- name='standby_delta', application_name=application_name)
+ add_replication_delta_chart_(
+ order=self.order,
+ definitions=self.definitions,
+ name='standby_delta',
+ application_name=application_name)
for slot_name in self.replication_slots[::-1]:
- add_replication_slot_chart_(order=self.order, definitions=self.definitions,
- name='replication_slot', slot_name=slot_name)
-
-
+ add_replication_slot_chart_(
+ order=self.order,
+ definitions=self.definitions,
+ name='replication_slot',
+ slot_name=slot_name)
def _get_data(self):
- result, error = self._connect()
+ result, _ = self._connect()
if result:
cursor = self.connection.cursor(cursor_factory=DictCursor)
try:
@@ -551,7 +700,8 @@ class Service(SimpleService):
else:
dimension_id = metric
if metric in row:
- self.data[dimension_id] = int(row[metric])
+ if row[metric] is not None:
+ self.data[dimension_id] = int(row[metric])
elif 'locks_count' in row:
self.data[dimension_id] = row['locks_count'] if metric == row['mode'] else 0
@@ -564,6 +714,7 @@ def discover_databases_(cursor, query):
result.append(db)
return result
+
def discover_secondaries_(cursor, query):
cursor.execute(query)
result = list()
@@ -572,6 +723,7 @@ def discover_secondaries_(cursor, query):
result.append(sc)
return result
+
def discover_replication_slots_(cursor, query):
cursor.execute(query)
result = list()
@@ -580,14 +732,17 @@ def discover_replication_slots_(cursor, query):
result.append(slot)
return result
+
def check_if_superuser_(cursor, query):
cursor.execute(query)
return cursor.fetchone()[0]
+
def detect_server_version(cursor, query):
cursor.execute(query)
return int(cursor.fetchone()[0])
+
def populate_lock_types(databases):
result = dict()
for database in databases:
@@ -626,11 +781,12 @@ def add_database_stat_chart_(order, definitions, name, database_name):
chart_template = CHARTS[name]
chart_name = '_'.join([database_name, name])
order.insert(0, chart_name)
- name, title, units, family, context, chart_type = chart_template['options']
+ name, title, units, _, context, chart_type = chart_template['options']
definitions[chart_name] = {
'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type],
'lines': create_lines(database_name, chart_template['lines'])}
+
def add_replication_delta_chart_(order, definitions, name, application_name):
def create_lines(standby, lines):
result = list()
@@ -648,6 +804,7 @@ def add_replication_delta_chart_(order, definitions, name, application_name):
'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type],
'lines': create_lines(application_name, chart_template['lines'])}
+
def add_replication_slot_chart_(order, definitions, name, slot_name):
def create_lines(slot, lines):
result = list()
diff --git a/conf.d/python.d/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf
index b69ca3717..b69ca3717 100644
--- a/conf.d/python.d/postgres.conf
+++ b/collectors/python.d.plugin/postgres/postgres.conf
diff --git a/collectors/python.d.plugin/powerdns/Makefile.inc b/collectors/python.d.plugin/powerdns/Makefile.inc
new file mode 100644
index 000000000..256d32a40
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += powerdns/powerdns.chart.py
+dist_pythonconfig_DATA += powerdns/powerdns.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += powerdns/README.md powerdns/Makefile.inc
+
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
new file mode 100644
index 000000000..3c4b145e0
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -0,0 +1,77 @@
+# powerdns
+
+Module monitor powerdns performance and health metrics.
+
+Powerdns charts:
+
+1. **Queries and Answers**
+ * udp-queries
+ * udp-answers
+ * tcp-queries
+ * tcp-answers
+
+2. **Cache Usage**
+ * query-cache-hit
+ * query-cache-miss
+ * packetcache-hit
+ * packetcache-miss
+
+3. **Cache Size**
+ * query-cache-size
+ * packetcache-size
+ * key-cache-size
+ * meta-cache-size
+
+4. **Latency**
+ * latency
+
+ Powerdns Recursor charts:
+
+ 1. **Questions In**
+ * questions
+ * ipv6-questions
+ * tcp-queries
+
+2. **Questions Out**
+ * all-outqueries
+ * ipv6-outqueries
+ * tcp-outqueries
+ * throttled-outqueries
+
+3. **Answer Times**
+ * answers-slow
+ * answers0-1
+ * answers1-10
+ * answers10-100
+ * answers100-1000
+
+4. **Timeouts**
+ * outgoing-timeouts
+ * outgoing4-timeouts
+ * outgoing6-timeouts
+
+5. **Drops**
+ * over-capacity-drops
+
+6. **Cache Usage**
+ * cache-hits
+ * cache-misses
+ * packetcache-hits
+ * packetcache-misses
+
+7. **Cache Size**
+ * cache-entries
+ * packetcache-entries
+ * negcache-entries
+
+### configuration
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
+ header :
+ X-API-Key: 'change_me'
+```
+
+---
diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
new file mode 100644
index 000000000..4264621b2
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+# Description: powerdns netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# Author: Luke Whitworth
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+priority = 60000
+retries = 60
+# update_every = 3
+
+ORDER = ['questions', 'cache_usage', 'cache_size', 'latency']
+CHARTS = {
+ 'questions': {
+ 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
+ 'lines': [
+ ['udp-queries', None, 'incremental'],
+ ['udp-answers', None, 'incremental'],
+ ['tcp-queries', None, 'incremental'],
+ ['tcp-answers', None, 'incremental']
+ ]
+ },
+ 'cache_usage': {
+ 'options': [None, 'PowerDNS Cache Usage', 'count', 'cache', 'powerdns.cache_usage', 'line'],
+ 'lines': [
+ ['query-cache-hit', None, 'incremental'],
+ ['query-cache-miss', None, 'incremental'],
+ ['packetcache-hit', 'packet-cache-hit', 'incremental'],
+ ['packetcache-miss', 'packet-cache-miss', 'incremental']
+ ]
+ },
+ 'cache_size': {
+ 'options': [None, 'PowerDNS Cache Size', 'count', 'cache', 'powerdns.cache_size', 'line'],
+ 'lines': [
+ ['query-cache-size', None, 'absolute'],
+ ['packetcache-size', 'packet-cache-size', 'absolute'],
+ ['key-cache-size', None, 'absolute'],
+ ['meta-cache-size', None, 'absolute']
+ ]
+ },
+ 'latency': {
+ 'options': [None, 'PowerDNS Latency', 'microseconds', 'latency', 'powerdns.latency', 'line'],
+ 'lines': [
+ ['latency', None, 'absolute']
+ ]
+ }
+}
+
+RECURSOR_ORDER = ['questions-in', 'questions-out', 'answer-times', 'timeouts', 'drops', 'cache_usage', 'cache_size']
+
+RECURSOR_CHARTS = {
+ 'questions-in': {
+ 'options': [None, 'PowerDNS Recursor Questions In', 'count', 'questions', 'powerdns_recursor.questions-in',
+ 'line'],
+ 'lines': [
+ ['questions', None, 'incremental'],
+ ['ipv6-questions', None, 'incremental'],
+ ['tcp-questions', None, 'incremental']
+ ]
+ },
+ 'questions-out': {
+ 'options': [None, 'PowerDNS Recursor Questions Out', 'count', 'questions', 'powerdns_recursor.questions-out',
+ 'line'],
+ 'lines': [
+ ['all-outqueries', None, 'incremental'],
+ ['ipv6-outqueries', None, 'incremental'],
+ ['tcp-outqueries', None, 'incremental'],
+ ['throttled-outqueries', None, 'incremental']
+ ]
+ },
+ 'answer-times': {
+ 'options': [None, 'PowerDNS Recursor Answer Times', 'count', 'performance', 'powerdns_recursor.answer-times',
+ 'line'],
+ 'lines': [
+ ['answers-slow', None, 'incremental'],
+ ['answers0-1', None, 'incremental'],
+ ['answers1-10', None, 'incremental'],
+ ['answers10-100', None, 'incremental'],
+ ['answers100-1000', None, 'incremental']
+ ]
+ },
+ 'timeouts': {
+ 'options': [None, 'PowerDNS Recursor Questions Time', 'count', 'performance', 'powerdns_recursor.timeouts',
+ 'line'],
+ 'lines': [
+ ['outgoing-timeouts', None, 'incremental'],
+ ['outgoing4-timeouts', None, 'incremental'],
+ ['outgoing6-timeouts', None, 'incremental']
+ ]
+ },
+ 'drops': {
+ 'options': [None, 'PowerDNS Recursor Drops', 'count', 'performance', 'powerdns_recursor.drops', 'line'],
+ 'lines': [
+ ['over-capacity-drops', None, 'incremental']
+ ]
+ },
+ 'cache_usage': {
+ 'options': [None, 'PowerDNS Recursor Cache Usage', 'count', 'cache', 'powerdns_recursor.cache_usage', 'line'],
+ 'lines': [
+ ['cache-hits', None, 'incremental'],
+ ['cache-misses', None, 'incremental'],
+ ['packetcache-hits', 'packet-cache-hit', 'incremental'],
+ ['packetcache-misses', 'packet-cache-miss', 'incremental']
+ ]
+ },
+ 'cache_size': {
+ 'options': [None, 'PowerDNS Recursor Cache Size', 'count', 'cache', 'powerdns_recursor.cache_size', 'line'],
+ 'lines': [
+ ['cache-entries', None, 'absolute'],
+ ['packetcache-entries', None, 'absolute'],
+ ['negcache-entries', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def check(self):
+ self._manager = self._build_manager()
+ if not self._manager:
+ return None
+
+ d = self._get_data()
+ if not d:
+ return False
+
+ if is_recursor(d):
+ self.order = RECURSOR_ORDER
+ self.definitions = RECURSOR_CHARTS
+ self.module_name = 'powerdns_recursor'
+
+ return True
+
+ def _get_data(self):
+ data = self._get_raw_data()
+ if not data:
+ return None
+ return dict((d['name'], d['value']) for d in loads(data))
+
+
+def is_recursor(d):
+ return 'over-capacity-drops' in d and 'tcp-questions' in d
diff --git a/conf.d/python.d/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf
index ca6200df1..ca6200df1 100644
--- a/conf.d/python.d/powerdns.conf
+++ b/collectors/python.d.plugin/powerdns/powerdns.conf
diff --git a/collectors/python.d.plugin/proxysql/Makefile.inc b/collectors/python.d.plugin/proxysql/Makefile.inc
new file mode 100644
index 000000000..66be372ce
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += proxysql/proxysql.chart.py
+dist_pythonconfig_DATA += proxysql/proxysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += proxysql/README.md proxysql/Makefile.inc
+
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
new file mode 100644
index 000000000..02388276e
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -0,0 +1,62 @@
+# proxysql
+
+This module monitors proxysql backend and frontend performance metrics.
+
+It produces:
+
+1. **Connections (frontend)**
+ * connected: number of frontend connections currently connected
+ * aborted: number of frontend connections aborted due to invalid credential or max_connections reached
+ * non_idle: number of frontend connections that are not currently idle
+ * created: number of frontend connections created
+2. **Questions (frontend)**
+ * questions: total number of queries sent from frontends
+ * slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time`
+3. **Overall Bandwith (backends)**
+ * in
+ * out
+4. **Status (backends)**
+ * Backends
+ * `1=ONLINE`: backend server is fully operational
+ * `2=SHUNNED`: backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold
+ * `3=OFFLINE_SOFT`: when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend
+ * `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work
+ * `-1`: Unknown status
+5. **Bandwith (backends)**
+ * Backends
+ * in
+ * out
+6. **Queries (backends)**
+ * Backends
+ * queries
+7. **Latency (backends)**
+ * Backends
+ * ping time
+8. **Pool connections (backends)**
+ * Backends
+ * Used: The number of connections are currently used by ProxySQL for sending queries to the backend server.
+ * Free: The number of connections are currently free.
+ * Established/OK: The number of connections were established successfully.
+ * Error: The number of connections weren't established successfully.
+9. **Commands**
+ * Commands
+ * Count
+ * Duration (Total duration for each command)
+10. **Commands Histogram**
+ * Commands
+ * 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one.
+
+### configuration
+
+```yaml
+tcpipv4:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '127.0.0.1'
+ port : '6032'
+```
+
+If no configuration is given, module will fail to run.
+
+---
diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py
new file mode 100644
index 000000000..f7e3d49f9
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py
@@ -0,0 +1,356 @@
+# -*- coding: utf-8 -*-
+# Description: Proxysql netdata python.d module
+# Author: Ali Borhani (alibo)
+# SPDX-License-Identifier: GPL-3.0+
+
+from bases.FrameworkServices.MySQLService import MySQLService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 3
+priority = 60000
+retries = 60
+
+
+def query(table, *params):
+ return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
+
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_global
+QUERY_GLOBAL = query(
+ "stats_mysql_global",
+ "Variable_Name",
+ "Variable_Value"
+)
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_connection_pool
+QUERY_CONNECTION_POOL = query(
+ "stats_mysql_connection_pool",
+ "hostgroup",
+ "srv_host",
+ "srv_port",
+ "status",
+ "ConnUsed",
+ "ConnFree",
+ "ConnOK",
+ "ConnERR",
+ "Queries",
+ "Bytes_data_sent",
+ "Bytes_data_recv",
+ "Latency_us"
+)
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_commands_counters
+QUERY_COMMANDS = query(
+ "stats_mysql_commands_counters",
+ "Command",
+ "Total_Time_us",
+ "Total_cnt",
+ "cnt_100us",
+ "cnt_500us",
+ "cnt_1ms",
+ "cnt_5ms",
+ "cnt_10ms",
+ "cnt_50ms",
+ "cnt_100ms",
+ "cnt_500ms",
+ "cnt_1s",
+ "cnt_5s",
+ "cnt_10s",
+ "cnt_INFs"
+)
+
+GLOBAL_STATS = [
+ 'client_connections_aborted',
+ 'client_connections_connected',
+ 'client_connections_created',
+ 'client_connections_non_idle',
+ 'proxysql_uptime',
+ 'questions',
+ 'slow_queries'
+]
+
+CONNECTION_POOL_STATS = [
+ 'status',
+ 'connused',
+ 'connfree',
+ 'connok',
+ 'connerr',
+ 'queries',
+ 'bytes_data_sent',
+ 'bytes_data_recv',
+ 'latency_us'
+]
+
+ORDER = [
+ 'connections',
+ 'active_transactions',
+ 'questions',
+ 'pool_overall_net',
+ 'commands_count',
+ 'commands_duration',
+ 'pool_status',
+ 'pool_net',
+ 'pool_queries',
+ 'pool_latency',
+ 'pool_connection_used',
+ 'pool_connection_free',
+ 'pool_connection_ok',
+ 'pool_connection_error'
+]
+
+HISTOGRAM_ORDER = [
+ '100us',
+ '500us',
+ '1ms',
+ '5ms',
+ '10ms',
+ '50ms',
+ '100ms',
+ '500ms',
+ '1s',
+ '5s',
+ '10s',
+ 'inf'
+]
+
+STATUS = {
+ "ONLINE": 1,
+ "SHUNNED": 2,
+ "OFFLINE_SOFT": 3,
+ "OFFLINE_HARD": 4
+}
+
+CHARTS = {
+ 'pool_status': {
+ 'options': [None, 'ProxySQL Backend Status', 'status', 'status', 'proxysql.pool_status', 'line'],
+ 'lines': []
+ },
+ 'pool_net': {
+ 'options': [None, 'ProxySQL Backend Bandwidth', 'kilobits/s', 'bandwidth', 'proxysql.pool_net', 'area'],
+ 'lines': []
+ },
+ 'pool_overall_net': {
+ 'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth',
+ 'proxysql.pool_overall_net', 'area'],
+ 'lines': [
+ ['bytes_data_recv', 'in', 'incremental', 8, 1024],
+ ['bytes_data_sent', 'out', 'incremental', -8, 1024]
+ ]
+ },
+ 'questions': {
+ 'options': [None, 'ProxySQL Frontend Questions', 'questions/s', 'questions', 'proxysql.questions', 'line'],
+ 'lines': [
+ ['questions', 'questions', 'incremental'],
+ ['slow_queries', 'slow_queries', 'incremental']
+ ]
+ },
+ 'pool_queries': {
+ 'options': [None, 'ProxySQL Backend Queries', 'queries/s', 'queries', 'proxysql.queries', 'line'],
+ 'lines': []
+ },
+ 'active_transactions': {
+ 'options': [None, 'ProxySQL Frontend Active Transactions', 'transactions/s', 'active_transactions',
+ 'proxysql.active_transactions', 'line'],
+ 'lines': [
+ ['active_transactions', 'active_transactions', 'absolute']
+ ]
+ },
+ 'pool_latency': {
+ 'options': [None, 'ProxySQL Backend Latency', 'ms', 'latency', 'proxysql.latency', 'line'],
+ 'lines': []
+ },
+ 'connections': {
+ 'options': [None, 'ProxySQL Frontend Connections', 'connections/s', 'connections', 'proxysql.connections',
+ 'line'],
+ 'lines': [
+ ['client_connections_connected', 'connected', 'absolute'],
+ ['client_connections_created', 'created', 'incremental'],
+ ['client_connections_aborted', 'aborted', 'incremental'],
+ ['client_connections_non_idle', 'non_idle', 'absolute']
+ ]
+ },
+ 'pool_connection_used': {
+ 'options': [None, 'ProxySQL Used Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_used_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_free': {
+ 'options': [None, 'ProxySQL Free Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_free_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_ok': {
+ 'options': [None, 'ProxySQL Established Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_ok_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_error': {
+ 'options': [None, 'ProxySQL Error Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_error_connections', 'line'],
+ 'lines': []
+ },
+ 'commands_count': {
+ 'options': [None, 'ProxySQL Commands', 'commands', 'commands', 'proxysql.commands_count', 'line'],
+ 'lines': []
+ },
+ 'commands_duration': {
+ 'options': [None, 'ProxySQL Commands Duration', 'ms', 'commands', 'proxysql.commands_duration', 'line'],
+ 'lines': []
+ }
+}
+
+
+class Service(MySQLService):
+ def __init__(self, configuration=None, name=None):
+ MySQLService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.queries = dict(
+ global_status=QUERY_GLOBAL,
+ connection_pool_status=QUERY_CONNECTION_POOL,
+ commands_status=QUERY_COMMANDS
+ )
+
+ def _get_data(self):
+ raw_data = self._get_raw_data(description=True)
+
+ if not raw_data:
+ return None
+
+ to_netdata = dict()
+
+ if 'global_status' in raw_data:
+ global_status = dict(raw_data['global_status'][0])
+ for key in global_status:
+ if key.lower() in GLOBAL_STATS:
+ to_netdata[key.lower()] = global_status[key]
+
+ if 'connection_pool_status' in raw_data:
+
+ to_netdata['bytes_data_recv'] = 0
+ to_netdata['bytes_data_sent'] = 0
+
+ for record in raw_data['connection_pool_status'][0]:
+ backend = self.generate_backend(record)
+ name = self.generate_backend_name(backend)
+
+ for key in backend:
+ if key in CONNECTION_POOL_STATS:
+ if key == 'status':
+ backend[key] = self.convert_status(backend[key])
+
+ if len(self.charts) > 0:
+ if (name + '_status') not in self.charts['pool_status']:
+ self.add_backend_dimensions(name)
+
+ to_netdata["{0}_{1}".format(name, key)] = backend[key]
+
+ if key == 'bytes_data_recv':
+ to_netdata['bytes_data_recv'] += int(backend[key])
+
+ if key == 'bytes_data_sent':
+ to_netdata['bytes_data_sent'] += int(backend[key])
+
+ if 'commands_status' in raw_data:
+ for record in raw_data['commands_status'][0]:
+ cmd = self.generate_command_stats(record)
+ name = cmd['name']
+
+ if len(self.charts) > 0:
+ if (name + '_count') not in self.charts['commands_count']:
+ self.add_command_dimensions(name)
+ self.add_histogram_chart(cmd)
+
+ to_netdata[name + '_count'] = cmd['count']
+ to_netdata[name + '_duration'] = cmd['duration']
+ for histogram in cmd['histogram']:
+ dimId = 'commands_histogram_{0}_{1}'.format(name, histogram)
+ to_netdata[dimId] = cmd['histogram'][histogram]
+
+ return to_netdata or None
+
+ def add_backend_dimensions(self, name):
+ self.charts['pool_status'].add_dimension([name + '_status', name, 'absolute'])
+ self.charts['pool_net'].add_dimension([name + '_bytes_data_recv', 'from_' + name, 'incremental', 8, 1024])
+ self.charts['pool_net'].add_dimension([name + '_bytes_data_sent', 'to_' + name, 'incremental', -8, 1024])
+ self.charts['pool_queries'].add_dimension([name + '_queries', name, 'incremental'])
+ self.charts['pool_latency'].add_dimension([name + '_latency_us', name, 'absolute', 1, 1000])
+ self.charts['pool_connection_used'].add_dimension([name + '_connused', name, 'absolute'])
+ self.charts['pool_connection_free'].add_dimension([name + '_connfree', name, 'absolute'])
+ self.charts['pool_connection_ok'].add_dimension([name + '_connok', name, 'incremental'])
+ self.charts['pool_connection_error'].add_dimension([name + '_connerr', name, 'incremental'])
+
+ def add_command_dimensions(self, cmd):
+ self.charts['commands_count'].add_dimension([cmd + '_count', cmd, 'incremental'])
+ self.charts['commands_duration'].add_dimension([cmd + '_duration', cmd, 'incremental', 1, 1000])
+
+ def add_histogram_chart(self, cmd):
+ chart = self.charts.add_chart(self.histogram_chart(cmd))
+
+ for histogram in HISTOGRAM_ORDER:
+ dimId = 'commands_histogram_{0}_{1}'.format(cmd['name'], histogram)
+ chart.add_dimension([dimId, histogram, 'incremental'])
+
+ @staticmethod
+ def histogram_chart(cmd):
+ return [
+ 'commands_historgram_' + cmd['name'],
+ None,
+ 'ProxySQL {0} Command Histogram'.format(cmd['name'].title()),
+ 'commands',
+ 'commands_histogram',
+ 'proxysql.commands_histogram_' + cmd['name'],
+ 'stacked'
+ ]
+
+ @staticmethod
+ def generate_backend(data):
+ return {
+ 'hostgroup': data[0],
+ 'srv_host': data[1],
+ 'srv_port': data[2],
+ 'status': data[3],
+ 'connused': data[4],
+ 'connfree': data[5],
+ 'connok': data[6],
+ 'connerr': data[7],
+ 'queries': data[8],
+ 'bytes_data_sent': data[9],
+ 'bytes_data_recv': data[10],
+ 'latency_us': data[11]
+ }
+
+ @staticmethod
+ def generate_command_stats(data):
+ return {
+ 'name': data[0].lower(),
+ 'duration': data[1],
+ 'count': data[2],
+ 'histogram': {
+ '100us': data[3],
+ '500us': data[4],
+ '1ms': data[5],
+ '5ms': data[6],
+ '10ms': data[7],
+ '50ms': data[8],
+ '100ms': data[9],
+ '500ms': data[10],
+ '1s': data[11],
+ '5s': data[12],
+ '10s': data[13],
+ 'inf': data[14]
+ }
+ }
+
+ @staticmethod
+ def generate_backend_name(backend):
+ hostgroup = backend['hostgroup'].replace(' ', '_').lower()
+ host = backend['srv_host'].replace('.', '_')
+
+ return "{0}_{1}_{2}".format(hostgroup, host, backend['srv_port'])
+
+ @staticmethod
+ def convert_status(status):
+ if status in STATUS:
+ return STATUS[status]
+ return -1
diff --git a/collectors/python.d.plugin/proxysql/proxysql.conf b/collectors/python.d.plugin/proxysql/proxysql.conf
new file mode 100644
index 000000000..d29c2e5be
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/proxysql.conf
@@ -0,0 +1,118 @@
+# netdata python.d.plugin configuration for ProxySQL
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, proxysql also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the proxysql username to use
+# pass: 'password' # the proxysql password to use
+#
+
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : 'localhost'
+ port : '6032'
+
+tcpipv4:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '127.0.0.1'
+ port : '6032'
+
+tcpipv6:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '::1'
+ port : '6032'
+
+tcp_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : 'localhost'
+ port : '6032'
+
+tcpipv4_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : '127.0.0.1'
+ port : '6032'
+
+tcpipv6_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : '::1'
+ port : '6032'
diff --git a/collectors/python.d.plugin/puppet/Makefile.inc b/collectors/python.d.plugin/puppet/Makefile.inc
new file mode 100644
index 000000000..fe94b9254
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += puppet/puppet.chart.py
+dist_pythonconfig_DATA += puppet/puppet.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += puppet/README.md puppet/Makefile.inc
+
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
new file mode 100644
index 000000000..8304c831e
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -0,0 +1,48 @@
+# puppet
+
+Monitor status of Puppet Server and Puppet DB.
+
+Following charts are drawn:
+
+1. **JVM Heap**
+ * committed (allocated from OS)
+ * used (actual use)
+2. **JVM Non-Heap**
+ * committed (allocated from OS)
+ * used (actual use)
+3. **CPU Usage**
+ * execution
+ * GC (taken by garbage collection)
+4. **File Descriptors**
+ * max
+ * used
+
+
+### configuration
+
+```yaml
+puppetdb:
+ url: 'https://fqdn.example.com:8081'
+ tls_cert_file: /path/to/client.crt
+ tls_key_file: /path/to/client.key
+ autodetection_retry: 1
+ retries: 3600
+
+puppetserver:
+ url: 'https://fqdn.example.com:8140'
+ autodetection_retry: 1
+ retries: 3600
+```
+
+When no configuration is given then `https://fqdn.example.com:8140` is
+tried without any retries.
+
+### notes
+
+* Exact Fully Qualified Domain Name of the node should be used.
+* Usually Puppet Server/DB startup time is VERY long. So, there should
+ be quite reasonable retry count.
+* Secure PuppetDB config may require client certificate. Not applies
+ to default PuppetDB configuration though.
+
+---
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
new file mode 100644
index 000000000..5c8e48bd9
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Description: puppet netdata python.d module
+# Author: Andrey Galkin <andrey@futoin.org> (andvgal)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This module should work both with OpenSource and PE versions
+# of PuppetServer and PuppetDB.
+#
+# NOTE: PuppetDB may be configured to require proper TLS
+# client certificate for security reasons. Use tls_key_file
+# and tls_cert_file options then.
+#
+
+from bases.FrameworkServices.UrlService import UrlService
+from json import loads
+import socket
+
+update_every = 5
+priority = 60000
+# very long clojure-based service startup time
+retries = 180
+
+MB = 1048576
+CPU_SCALE = 1000
+ORDER = [
+ 'jvm_heap',
+ 'jvm_nonheap',
+ 'cpu',
+ 'fd_open',
+]
+CHARTS = {
+ 'jvm_heap': {
+ 'options': [None, 'JVM Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_heap_committed', 'committed', 'absolute', 1, MB],
+ ['jvm_heap_used', 'used', 'absolute', 1, MB],
+ ],
+ 'variables': [
+ ['jvm_heap_max'],
+ ['jvm_heap_init'],
+ ],
+ },
+ 'jvm_nonheap': {
+ 'options': [None, 'JVM Non-Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_nonheap_committed', 'committed', 'absolute', 1, MB],
+ ['jvm_nonheap_used', 'used', 'absolute', 1, MB],
+ ],
+ 'variables': [
+ ['jvm_nonheap_max'],
+ ['jvm_nonheap_init'],
+ ],
+ },
+ 'cpu': {
+ 'options': [None, 'CPU usage', 'percentage', 'resources', 'puppet.cpu', 'stacked'],
+ 'lines': [
+ ['cpu_time', 'execution', 'absolute', 1, CPU_SCALE],
+ ['gc_time', 'GC', 'absolute', 1, CPU_SCALE],
+ ]
+ },
+ 'fd_open': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'resources', 'puppet.fdopen', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute'],
+ ],
+ 'variables': [
+ ['fd_max'],
+ ],
+ },
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = 'https://{0}:8140'.format(socket.getfqdn())
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ # NOTE: there are several ways to retrieve data
+ # 1. Only PE versions:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html
+ # 2. Inidividual Metrics API (JMX):
+ # https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html
+ # 3. Extended status at debug level:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html
+ #
+ # For sake of simplicity and efficiency the status one is used..
+
+ raw_data = self._get_raw_data(self.url + '/status/v1/services?level=debug')
+
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+ data = {}
+
+ try:
+ try:
+ jvm_metrics = raw_data['status-service']['status']['experimental']['jvm-metrics']
+ except KeyError:
+ jvm_metrics = raw_data['status-service']['status']['jvm-metrics']
+
+ heap_mem = jvm_metrics['heap-memory']
+ non_heap_mem = jvm_metrics['non-heap-memory']
+
+ for k in ['max', 'committed', 'used', 'init']:
+ data['jvm_heap_'+k] = heap_mem[k]
+ data['jvm_nonheap_'+k] = non_heap_mem[k]
+
+ fd_open = jvm_metrics['file-descriptors']
+ data['fd_max'] = fd_open['max']
+ data['fd_used'] = fd_open['used']
+
+ data['cpu_time'] = int(jvm_metrics['cpu-usage'] * CPU_SCALE)
+ data['gc_time'] = int(jvm_metrics['gc-cpu-usage'] * CPU_SCALE)
+ except KeyError:
+ pass
+
+ return data or None
diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf
new file mode 100644
index 000000000..991bfabed
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.conf
@@ -0,0 +1,98 @@
+# netdata python.d.plugin configuration for Puppet Server and Puppet DB
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# These configuration comes from UrlService base:
+# url: # HTTP or HTTPS URL
+# tls_verify: False # Control HTTPS server certificate verification
+# tls_ca_file: # Optional CA (bundle) file to use
+# tls_cert_file: # Optional client certificate file
+# tls_key_file: # Optional client key file
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+# puppet:
+# url: 'https://<FQDN>:8140'
+#
+
+#
+# Production configuration should look like below.
+#
+# NOTE: usually Puppet Server/DB startup time is VERY long. So, there should
+# be quite reasonable retry count.
+#
+# NOTE: secure PuppetDB config may require client certificate.
+# Not applies to default PuppetDB configuration though.
+#
+# puppetdb:
+# url: 'https://fqdn.example.com:8081'
+# tls_cert_file: /path/to/client.crt
+# tls_key_file: /path/to/client.key
+# autodetection_retry: 1
+# retries: 3600
+#
+# puppetserver:
+# url: 'https://fqdn.example.com:8140'
+# autodetection_retry: 1
+# retries: 3600
+#
diff --git a/conf.d/python.d.conf b/collectors/python.d.plugin/python.d.conf
index bb57738bb..97f4cb8d5 100644
--- a/conf.d/python.d.conf
+++ b/collectors/python.d.plugin/python.d.conf
@@ -19,11 +19,19 @@ enabled: yes
# If "default_run" = "no" the default for all modules is disabled (no).
# Setting any of these to "yes" will enable it.
+# Enable / Disable explicit garbage collection (full collection run). Default is enabled.
+gc_run: yes
+
+# Garbage collection interval in seconds. Default is 300.
+gc_interval: 300
+
+# apache: yes
+
# apache_cache has been replaced by web_log
apache_cache: no
-# apache: yes
# beanstalk: yes
# bind_rndc: yes
+# boinc: yes
# ceph: yes
chrony: no
# couchdb: yes
@@ -40,40 +48,50 @@ example: no
# exim: yes
# fail2ban: yes
# freeradius: yes
+go_expvar: no
# gunicorn_log has been replaced by web_log
gunicorn_log: no
-go_expvar: no
# haproxy: yes
# hddtemp: yes
# icecast: yes
# ipfs: yes
# isc_dhcpd: yes
+# linux_power_supply: yes
+# litespeed: yes
+logind: no
# mdstat: yes
# memcached: yes
# mongodb: yes
+# monit: yes
# mysql: yes
# nginx: yes
# nginx_plus: yes
-# nsd: yes
-# ntpd: yes
# nginx_log has been replaced by web_log
nginx_log: no
+# nsd: yes
# ntpd: yes
# ovpn_status_log: yes
# phpfpm: yes
# postfix: yes
# postgres: yes
# powerdns: yes
+# proxysql: yes
+# puppet: yes
# rabbitmq: yes
# redis: yes
+# rethinkdbs: yes
# retroshare: yes
-# sensors: yes
# samba: yes
+# sensors: yes
# smartd_log: yes
-# squid: yes
+# spigotmc: yes
# springboot: yes
+# squid: yes
# tomcat: yes
+unbound: no
+# uwsgi: yes
# varnish: yes
+# w1sensor: yes
# web_log: yes
diff --git a/plugins.d/python.d.plugin b/collectors/python.d.plugin/python.d.plugin
index c9b260164..264c3383d 100755..100644
--- a/plugins.d/python.d.plugin
+++ b/collectors/python.d.plugin/python.d.plugin
@@ -6,7 +6,9 @@ echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
# Description:
# Author: Pawel Krupa (paulfantom)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+import gc
import os
import sys
import threading
@@ -15,22 +17,30 @@ from re import sub
from sys import version_info, argv
from time import sleep
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
+GC_RUN = True
+GC_COLLECT_EVERY = 300
PY_VERSION = version_info[:2]
-PLUGIN_CONFIG_DIR = os.getenv('NETDATA_CONFIG_DIR', os.path.dirname(__file__) + '/../../../../etc/netdata') + '/'
-CHARTS_PY_DIR = os.path.abspath(os.getenv('NETDATA_PLUGINS_DIR', os.path.dirname(__file__)) + '/../python.d') + '/'
-CHARTS_PY_CONFIG_DIR = PLUGIN_CONFIG_DIR + 'python.d/'
-PYTHON_MODULES_DIR = CHARTS_PY_DIR + 'python_modules'
+
+USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '/usr/local/etc/netdata')
+STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '/usr/local/lib/netdata/conf.d')
+
+PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d')
+PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d')
+
+
+PLUGINS_DIR = os.path.abspath(os.getenv(
+ 'NETDATA_PLUGINS_DIR',
+ os.path.dirname(__file__)) + '/../python.d')
+
+
+PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules')
sys.path.append(PYTHON_MODULES_DIR)
-from bases.loaders import ModuleAndConfigLoader
-from bases.loggers import PythonDLogger
-from bases.collection import setdefault_values, run_and_exit
+from bases.loaders import ModuleAndConfigLoader # noqa: E402
+from bases.loggers import PythonDLogger # noqa: E402
+from bases.collection import setdefault_values, run_and_exit # noqa: E402
try:
from collections import OrderedDict
@@ -53,7 +63,7 @@ def module_ok(m):
return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES
-ALL_MODULES = [m for m in sorted(os.listdir(CHARTS_PY_DIR)) if module_ok(m)]
+ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)]
def parse_cmd():
@@ -68,6 +78,13 @@ def multi_job_check(config):
return next((True for key in config if isinstance(config[key], dict)), False)
+class RawModule:
+ def __init__(self, name, path, explicitly_enabled=True):
+ self.name = name
+ self.path = path
+ self.explicitly_enabled = explicitly_enabled
+
+
class Job(object):
def __init__(self, initialized_job, job_id):
"""
@@ -80,7 +97,7 @@ class Job(object):
self.recheck_every = self.job.configuration.pop('autodetection_retry')
self.checked = False # used in Plugin.check_job()
self.created = False # used in Plugin.create_job_charts()
- if OVERRIDE_UPDATE_EVERY:
+ if self.job.update_every < int(OVERRIDE_UPDATE_EVERY):
self.job.update_every = int(OVERRIDE_UPDATE_EVERY)
def __getattr__(self, item):
@@ -194,9 +211,22 @@ class Plugin(object):
self.modules = OrderedDict()
self.sleep_time = 1
self.runs_counter = 0
- self.config, error = self.loader.load_config_from_file(PLUGIN_CONFIG_DIR + 'python.d.conf')
+
+ user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf')
+ stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ self.config, error = self.loader.load_config_from_file(user_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ self.config, error = self.loader.load_config_from_file(stock_config)
if error:
- Logger.error('"python.d.conf" configuration file not found. Using defaults.')
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ self.do_gc = self.config.get("gc_run", GC_RUN)
+ self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY)
if not self.config.get('enabled', True):
run_and_exit(Logger.info)('DISABLED in configuration file.')
@@ -223,47 +253,57 @@ class Plugin(object):
def enabled_modules(self):
for mod in MODULES_TO_RUN:
mod_name = mod[:-len(MODULE_EXTENSION)]
- mod_path = CHARTS_PY_DIR + mod
- conf_path = ''.join([CHARTS_PY_CONFIG_DIR, mod_name, '.conf'])
-
- if DEBUG:
- yield mod, mod_name, mod_path, conf_path
- else:
- if all([self.config.get('default_run', True),
- self.config.get(mod_name, True)]):
- yield mod, mod_name, mod_path, conf_path
-
- elif all([not self.config.get('default_run'),
- self.config.get(mod_name)]):
- yield mod, mod_name, mod_path, conf_path
+ mod_path = os.path.join(PLUGINS_DIR, mod)
+ if any(
+ [
+ self.config.get('default_run', True) and self.config.get(mod_name, True),
+ (not self.config.get('default_run')) and self.config.get(mod_name),
+ ]
+ ):
+ yield RawModule(
+ name=mod_name,
+ path=mod_path,
+ explicitly_enabled=self.config.get(mod_name),
+ )
def load_and_initialize_modules(self):
- for mod, mod_name, mod_path, conf_path in self.enabled_modules():
+ for mod in self.enabled_modules():
# Load module from file ------------------------------------------------------------
- loaded_module, error = self.loader.load_module_from_file(mod_name, mod_path)
+ loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path)
log = Logger.error if error else Logger.debug
log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
- module_name=mod_name))
+ module_name=mod.name))
if error:
Logger.error("load source error : {0}".format(error))
continue
# Load module config from file ------------------------------------------------------
- loaded_config, error = self.loader.load_config_from_file(conf_path)
- log = Logger.error if error else Logger.debug
- log("module load config: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
- module_name=mod_name))
+ user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf')
+ stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ loaded_config, error = self.loader.load_config_from_file(user_config)
if error:
- Logger.error('load config error : {0}'.format(error))
+ Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ loaded_config, error = self.loader.load_config_from_file(stock_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ # Skip disabled modules
+ if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled:
+ Logger.info("module '{0}' disabled by default".format(loaded_module.__name__))
+ continue
+
+ # Module initialization ---------------------------------------------------
- # Service instance initialization ---------------------------------------------------
initialized_module = Module(service=loaded_module, config=loaded_config)
Logger.debug("module status: '{module_name}' => [{status}] "
"(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED',
module_name=initialized_module.name,
jobs_number=len(initialized_module)))
-
if initialized_module:
self.modules[initialized_module.name] = initialized_module
@@ -349,6 +389,11 @@ class Plugin(object):
self.cleanup()
self.autodetect_retry()
+ # FIXME: https://github.com/netdata/netdata/issues/3817
+ if self.do_gc and self.runs_counter % self.gc_interval == 0:
+ v = gc.collect()
+ Logger.debug("GC full collection run result: {0}".format(v))
+
def cleanup(self):
for job in self.dead_jobs:
self.delete_job(job)
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
new file mode 100755
index 000000000..7ac03fd99
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -0,0 +1,427 @@
+#!/usr/bin/env bash
+'''':; exec "$(command -v python || command -v python3 || command -v python2 ||
+echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
+
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import gc
+import os
+import sys
+import threading
+
+from re import sub
+from sys import version_info, argv
+from time import sleep
+
+GC_RUN = True
+GC_COLLECT_EVERY = 300
+
+PY_VERSION = version_info[:2]
+
+USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '@configdir_POST@')
+STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '@libconfigdir_POST@')
+
+PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d')
+PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d')
+
+
+PLUGINS_DIR = os.path.abspath(os.getenv(
+ 'NETDATA_PLUGINS_DIR',
+ os.path.dirname(__file__)) + '/../python.d')
+
+
+PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules')
+
+sys.path.append(PYTHON_MODULES_DIR)
+
+from bases.loaders import ModuleAndConfigLoader # noqa: E402
+from bases.loggers import PythonDLogger # noqa: E402
+from bases.collection import setdefault_values, run_and_exit # noqa: E402
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
+ 'retries': 60,
+ 'priority': 60000,
+ 'autodetection_retry': 0,
+ 'chart_cleanup': 10,
+ 'name': str()}
+
+
+MODULE_EXTENSION = '.chart.py'
+OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log']
+
+
+def module_ok(m):
+ return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES
+
+
+ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)]
+
+
+def parse_cmd():
+ debug = 'debug' in argv[1:]
+ trace = 'trace' in argv[1:]
+ override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False)
+ modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES]
+ return debug, trace, override_update_every, modules or ALL_MODULES
+
+
+def multi_job_check(config):
+ return next((True for key in config if isinstance(config[key], dict)), False)
+
+
+class RawModule:
+ def __init__(self, name, path, explicitly_enabled=True):
+ self.name = name
+ self.path = path
+ self.explicitly_enabled = explicitly_enabled
+
+
+class Job(object):
+ def __init__(self, initialized_job, job_id):
+ """
+ :param initialized_job: instance of <Class Service>
+ :param job_id: <str>
+ """
+ self.job = initialized_job
+ self.id = job_id # key in Modules.jobs()
+ self.module_name = self.job.__module__ # used in Plugin.delete_job()
+ self.recheck_every = self.job.configuration.pop('autodetection_retry')
+ self.checked = False # used in Plugin.check_job()
+ self.created = False # used in Plugin.create_job_charts()
+ if self.job.update_every < int(OVERRIDE_UPDATE_EVERY):
+ self.job.update_every = int(OVERRIDE_UPDATE_EVERY)
+
+ def __getattr__(self, item):
+ return getattr(self.job, item)
+
+ def __repr__(self):
+ return self.job.__repr__()
+
+ def is_dead(self):
+ return bool(self.ident) and not self.is_alive()
+
+ def not_launched(self):
+ return not bool(self.ident)
+
+ def is_autodetect(self):
+ return self.recheck_every
+
+
+class Module(object):
+ def __init__(self, service, config):
+ """
+ :param service: <Module>
+ :param config: <dict>
+ """
+ self.service = service
+ self.name = service.__name__
+ self.config = self.jobs_configurations_builder(config)
+ self.jobs = OrderedDict()
+ self.counter = 1
+
+ self.initialize_jobs()
+
+ def __repr__(self):
+ return "<Class Module '{name}'>".format(name=self.name)
+
+ def __iter__(self):
+ return iter(OrderedDict(self.jobs).values())
+
+ def __getitem__(self, item):
+ return self.jobs[item]
+
+ def __delitem__(self, key):
+ del self.jobs[key]
+
+ def __len__(self):
+ return len(self.jobs)
+
+ def __bool__(self):
+ return bool(self.jobs)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def jobs_configurations_builder(self, config):
+ """
+ :param config: <dict>
+ :return:
+ """
+ counter = 0
+ job_base_config = dict()
+
+ for attr in BASE_CONFIG:
+ job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr]))
+
+ if not config:
+ config = {str(): dict()}
+ elif not multi_job_check(config):
+ config = {str(): config}
+
+ for job_name in config:
+ if not isinstance(config[job_name], dict):
+ continue
+
+ job_config = setdefault_values(config[job_name], base_dict=job_base_config)
+ job_name = sub(r'\s+', '_', job_name)
+ config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name'])
+ counter += 1
+ job_id = 'job' + str(counter).zfill(3)
+
+ yield job_id, job_name, job_config
+
+ def initialize_jobs(self):
+ """
+ :return:
+ """
+ for job_id, job_name, job_config in self.config:
+ job_config['job_name'] = job_name
+ job_config['override_name'] = job_config.pop('name')
+
+ try:
+ initialized_job = self.service.Service(configuration=job_config)
+ except Exception as error:
+ Logger.error("job initialization: '{module_name} {job_name}' "
+ "=> ['FAILED'] ({error})".format(module_name=self.name,
+ job_name=job_name,
+ error=error))
+ continue
+ else:
+ Logger.debug("job initialization: '{module_name} {job_name}' "
+ "=> ['OK']".format(module_name=self.name,
+ job_name=job_name or self.name))
+ self.jobs[job_id] = Job(initialized_job=initialized_job,
+ job_id=job_id)
+ del self.config
+ del self.service
+
+
+class Plugin(object):
+ def __init__(self):
+ self.loader = ModuleAndConfigLoader()
+ self.modules = OrderedDict()
+ self.sleep_time = 1
+ self.runs_counter = 0
+
+ user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf')
+ stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ self.config, error = self.loader.load_config_from_file(user_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ self.config, error = self.loader.load_config_from_file(stock_config)
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ self.do_gc = self.config.get("gc_run", GC_RUN)
+ self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY)
+
+ if not self.config.get('enabled', True):
+ run_and_exit(Logger.info)('DISABLED in configuration file.')
+
+ self.load_and_initialize_modules()
+ if not self.modules:
+ run_and_exit(Logger.info)('No modules to run. Exit...')
+
+ def __iter__(self):
+ return iter(OrderedDict(self.modules).values())
+
+ @property
+ def jobs(self):
+ return (job for mod in self for job in mod)
+
+ @property
+ def dead_jobs(self):
+ return (job for job in self.jobs if job.is_dead())
+
+ @property
+ def autodetect_jobs(self):
+ return [job for job in self.jobs if job.not_launched()]
+
+ def enabled_modules(self):
+ for mod in MODULES_TO_RUN:
+ mod_name = mod[:-len(MODULE_EXTENSION)]
+ mod_path = os.path.join(PLUGINS_DIR, mod)
+ if any(
+ [
+ self.config.get('default_run', True) and self.config.get(mod_name, True),
+ (not self.config.get('default_run')) and self.config.get(mod_name),
+ ]
+ ):
+ yield RawModule(
+ name=mod_name,
+ path=mod_path,
+ explicitly_enabled=self.config.get(mod_name),
+ )
+
+ def load_and_initialize_modules(self):
+ for mod in self.enabled_modules():
+
+ # Load module from file ------------------------------------------------------------
+ loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path)
+ log = Logger.error if error else Logger.debug
+ log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
+ module_name=mod.name))
+ if error:
+ Logger.error("load source error : {0}".format(error))
+ continue
+
+ # Load module config from file ------------------------------------------------------
+ user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf')
+ stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ loaded_config, error = self.loader.load_config_from_file(user_config)
+ if error:
+ Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ loaded_config, error = self.loader.load_config_from_file(stock_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ # Skip disabled modules
+ if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled:
+ Logger.info("module '{0}' disabled by default".format(loaded_module.__name__))
+ continue
+
+ # Module initialization ---------------------------------------------------
+
+ initialized_module = Module(service=loaded_module, config=loaded_config)
+ Logger.debug("module status: '{module_name}' => [{status}] "
+ "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED',
+ module_name=initialized_module.name,
+ jobs_number=len(initialized_module)))
+ if initialized_module:
+ self.modules[initialized_module.name] = initialized_module
+
+ @staticmethod
+ def check_job(job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ try:
+ check_ok = bool(job.check())
+ except Exception as error:
+ job.error('check() unhandled exception: {error}'.format(error=error))
+ return None
+ else:
+ return check_ok
+
+ @staticmethod
+ def create_job_charts(job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ try:
+ create_ok = job.create()
+ except Exception as error:
+ job.error('create() unhandled exception: {error}'.format(error=error))
+ return False
+ else:
+ return create_ok
+
+ def delete_job(self, job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ del self.modules[job.module_name][job.id]
+
+ def run_check(self):
+ checked = list()
+ for job in self.jobs:
+ if job.name in checked:
+ job.info('check() => [DROPPED] (already served by another job)')
+ self.delete_job(job)
+ continue
+ ok = self.check_job(job)
+ if ok:
+ job.info('check() => [OK]')
+ checked.append(job.name)
+ job.checked = True
+ continue
+ if not job.is_autodetect() or ok is None:
+ job.info('check() => [FAILED]')
+ self.delete_job(job)
+ else:
+ job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every))
+
+ def run_create(self):
+ for job in self.jobs:
+ if not job.checked:
+ # skip autodetection_retry jobs
+ continue
+ ok = self.create_job_charts(job)
+ if ok:
+ job.debug('create() => [OK] (charts: {0})'.format(len(job.charts)))
+ job.created = True
+ continue
+ job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts)))
+ self.delete_job(job)
+
+ def start(self):
+ self.run_check()
+ self.run_create()
+ for job in self.jobs:
+ if job.created:
+ job.start()
+
+ while True:
+ if threading.active_count() <= 1 and not self.autodetect_jobs:
+ run_and_exit(Logger.info)('FINISHED')
+
+ sleep(self.sleep_time)
+ self.cleanup()
+ self.autodetect_retry()
+
+ # FIXME: https://github.com/netdata/netdata/issues/3817
+ if self.do_gc and self.runs_counter % self.gc_interval == 0:
+ v = gc.collect()
+ Logger.debug("GC full collection run result: {0}".format(v))
+
+ def cleanup(self):
+ for job in self.dead_jobs:
+ self.delete_job(job)
+ for mod in self:
+ if not mod:
+ del self.modules[mod.name]
+
+ def autodetect_retry(self):
+ self.runs_counter += self.sleep_time
+ for job in self.autodetect_jobs:
+ if self.runs_counter % job.recheck_every == 0:
+ checked = self.check_job(job)
+ if checked:
+ created = self.create_job_charts(job)
+ if not created:
+ self.delete_job(job)
+ continue
+ job.start()
+
+
+if __name__ == '__main__':
+ DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd()
+ Logger = PythonDLogger()
+ if DEBUG:
+ Logger.logger.severity = 'DEBUG'
+ if TRACE:
+ Logger.log_traceback = True
+ Logger.info('Using python {version}'.format(version=PY_VERSION[0]))
+
+ plugin = Plugin()
+ plugin.start()
diff --git a/python.d/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/third_party/__init__.py
+++ b/collectors/python.d.plugin/python_modules/__init__.py
diff --git a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
index a71f2bfd2..72f9ff714 100644
--- a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
@@ -2,6 +2,7 @@
# Description:
# Author: Pawel Krupa (paulfantom)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
@@ -16,15 +17,15 @@ class ExecutableService(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name)
self.command = None
- def _get_raw_data(self, stderr=False):
+ def _get_raw_data(self, stderr=False, command=None):
"""
Get raw data from executed command
:return: <list>
"""
try:
- p = Popen(self.command, stdout=PIPE, stderr=PIPE)
+ p = Popen(command if command else self.command, stdout=PIPE, stderr=PIPE)
except Exception as error:
- self.error('Executing command {command} resulted in error: {error}'.format(command=self.command,
+ self.error('Executing command {command} resulted in error: {error}'.format(command=command or self.command,
error=error))
return None
data = list()
@@ -35,7 +36,7 @@ class ExecutableService(SimpleService):
except TypeError:
continue
- return data or None
+ return data
def check(self):
"""
diff --git a/python.d/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
index 45daa2446..5acfd73f8 100644
--- a/python.d/python_modules/bases/FrameworkServices/LogService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from glob import glob
import os
diff --git a/python.d/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
index 3acc5b109..53807e2c4 100644
--- a/python.d/python_modules/bases/FrameworkServices/MySQLService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from sys import exc_info
diff --git a/python.d/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
index 177332c1f..dd53fbc14 100644
--- a/python.d/python_modules/bases/FrameworkServices/SimpleService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -2,13 +2,12 @@
# Description:
# Author: Pawel Krupa (paulfantom)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from threading import Thread
+from time import sleep
-try:
- from time import sleep, monotonic as time
-except ImportError:
- from time import sleep, time
+from third_party.monotonic import monotonic
from bases.charts import Charts, ChartError, create_runtime_chart
from bases.collection import OldVersionCompatibility, safe_print
@@ -168,7 +167,7 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
'retries: {retries}'.format(freq=job.FREQ, retries=job.RETRIES_MAX - job.RETRIES))
while True:
- job.START_RUN = time()
+ job.START_RUN = monotonic()
job.NEXT_RUN = job.START_RUN - (job.START_RUN % job.FREQ) + job.FREQ + job.PENALTY
@@ -189,7 +188,7 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
if not self.manage_retries():
return
else:
- job.ELAPSED = int((time() - job.START_RUN) * 1e3)
+ job.ELAPSED = int((monotonic() - job.START_RUN) * 1e3)
job.PREV_UPDATE = job.START_RUN
job.RETRIES, job.PENALTY = 0, 0
safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
@@ -253,7 +252,7 @@ class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, objec
self.debug('sleeping for {sleep_time} to reach frequency of {freq} sec'.format(sleep_time=sleep_time,
freq=job.FREQ + job.PENALTY))
sleep(sleep_time)
- job.START_RUN = time()
+ job.START_RUN = monotonic()
def get_data(self):
return self._get_data()
diff --git a/python.d/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
index 8d27ae660..e85455307 100644
--- a/python.d/python_modules/bases/FrameworkServices/SocketService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -1,9 +1,18 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import socket
+try:
+ import ssl
+except ImportError:
+ _TLS_SUPPORT = False
+else:
+ _TLS_SUPPORT = True
+
from bases.FrameworkServices.SimpleService import SimpleService
@@ -16,6 +25,9 @@ class SocketService(SimpleService):
self.unix_socket = None
self.dgram_socket = False
self.request = ''
+ self.tls = False
+ self.cert = None
+ self.key = None
self.__socket_config = None
self.__empty_request = "".encode()
SimpleService.__init__(self, configuration=configuration, name=name)
@@ -26,7 +38,7 @@ class SocketService(SimpleService):
message=message))
else:
if self.__socket_config is not None:
- af, sock_type, proto, canon_name, sa = self.__socket_config
+ _, _, _, _, sa = self.__socket_config
self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0],
port=sa[1],
message=message))
@@ -44,7 +56,7 @@ class SocketService(SimpleService):
self.error("Cannot create socket to 'None':")
return False
- af, sock_type, proto, canon_name, sa = res
+ af, sock_type, proto, _, sa = res
try:
self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
self._sock = socket.socket(af, sock_type, proto)
@@ -56,10 +68,24 @@ class SocketService(SimpleService):
self.__socket_config = None
return False
+ if self.tls:
+ try:
+ self.debug('Encapsulating socket with TLS')
+ self._sock = ssl.wrap_socket(self._sock,
+ keyfile=self.key,
+ certfile=self.cert,
+ server_side=False,
+ cert_reqs=ssl.CERT_NONE)
+ except (socket.error, ssl.SSLError) as error:
+ self.error('Failed to wrap socket.')
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
try:
self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
self._sock.connect(sa)
- except socket.error as error:
+ except (socket.error, ssl.SSLError) as error:
self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0],
port=sa[1],
error=error))
@@ -147,7 +173,7 @@ class SocketService(SimpleService):
pass
self._sock = None
- def _send(self):
+ def _send(self, request=None):
"""
Send request.
:return: boolean
@@ -155,8 +181,8 @@ class SocketService(SimpleService):
# Send request if it is needed
if self.request != self.__empty_request:
try:
- self.debug('sending request: {0}'.format(self.request))
- self._sock.send(self.request)
+ self.debug('sending request: {0}'.format(request or self.request))
+ self._sock.send(request or self.request)
except Exception as error:
self._socket_error('error sending request: {0}'.format(error))
self._disconnect()
@@ -197,7 +223,7 @@ class SocketService(SimpleService):
self.debug('final response: {0}'.format(data))
return data
- def _get_raw_data(self, raw=False):
+ def _get_raw_data(self, raw=False, request=None):
"""
Get raw data with low-level "socket" module.
:param raw: set `True` to return bytes
@@ -211,7 +237,7 @@ class SocketService(SimpleService):
return None
# Send request if it is needed
- if not self._send():
+ if not self._send(request):
return None
data = self._receive(raw)
@@ -249,6 +275,28 @@ class SocketService(SimpleService):
except (KeyError, TypeError):
self.debug('No port specified. Using: "{0}"'.format(self.port))
+ self.tls = bool(self.configuration.get('tls', self.tls))
+ if self.tls and not _TLS_SUPPORT:
+ self.warning('TLS requested but no TLS module found, disabling TLS support.')
+ self.tls = False
+ if _TLS_SUPPORT and not self.tls:
+ self.debug('No TLS preference specified, not using TLS.')
+
+ if self.tls and _TLS_SUPPORT:
+ self.key = self.configuration.get('tls_key_file')
+ self.cert = self.configuration.get('tls_cert_file')
+ if not self.cert:
+ # If there's not a valid certificate, clear the key too.
+ self.debug('No valid TLS client certificate configuration found.')
+ self.key = None
+ self.cert = None
+ elif not self.key:
+ # If a key isn't listed, the config may still be
+ # valid, because there may be a key attached to the
+ # certificate.
+ self.info('No TLS client key specified, assuming it\'s attached to the certificate.')
+ self.key = None
+
try:
self.request = str(self.configuration['request'])
except (KeyError, TypeError):
diff --git a/python.d/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
index bb340ba3b..856f38851 100644
--- a/python.d/python_modules/bases/FrameworkServices/UrlService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -2,6 +2,7 @@
# Description:
# Author: Pawel Krupa (paulfantom)
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import urllib3
@@ -22,8 +23,13 @@ class UrlService(SimpleService):
self.proxy_user = self.configuration.get('proxy_user')
self.proxy_password = self.configuration.get('proxy_pass')
self.proxy_url = self.configuration.get('proxy_url')
+ self.method = self.configuration.get('method', 'GET')
self.header = self.configuration.get('header')
self.request_timeout = self.configuration.get('timeout', 1)
+ self.tls_verify = self.configuration.get('tls_verify')
+ self.tls_ca_file = self.configuration.get('tls_ca_file')
+ self.tls_key_file = self.configuration.get('tls_key_file')
+ self.tls_cert_file = self.configuration.get('tls_cert_file')
self._manager = None
def __make_headers(self, **header_kw):
@@ -60,9 +66,21 @@ class UrlService(SimpleService):
else:
manager = urllib3.PoolManager
params = dict(headers=header)
+ tls_cert_file = self.tls_cert_file
+ if tls_cert_file:
+ params['cert_file'] = tls_cert_file
+ # NOTE: key_file is useless without cert_file, but
+ # cert_file may include the key as well.
+ tls_key_file = self.tls_key_file
+ if tls_key_file:
+ params['key_file'] = tls_key_file
+ tls_ca_file = self.tls_ca_file
+ if tls_ca_file:
+ params['ca_certs'] = tls_ca_file
try:
url = header_kw.get('url') or self.url
- if url.startswith('https'):
+ if url.startswith('https') and not self.tls_verify and not tls_ca_file:
+ params['ca_certs'] = None
return manager(assert_hostname=False, cert_reqs='CERT_NONE', **params)
return manager(**params)
except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error:
@@ -77,13 +95,13 @@ class UrlService(SimpleService):
try:
status, data = self._get_raw_data_with_status(url, manager)
except (urllib3.exceptions.HTTPError, TypeError, AttributeError) as error:
- self.error('Url: {url}. Error: {error}'.format(url=url, error=error))
+ self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error))
return None
if status == 200:
- return data.decode()
+ return data
else:
- self.debug('Url: {url}. Http response status code: {code}'.format(url=url, code=status))
+ self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=status))
return None
def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True):
@@ -93,13 +111,15 @@ class UrlService(SimpleService):
"""
url = url or self.url
manager = manager or self._manager
- response = manager.request(method='GET',
+ response = manager.request(method=self.method,
url=url,
timeout=self.request_timeout,
retries=retries,
headers=manager.headers,
redirect=redirect)
- return response.status, response.data
+ if isinstance(response.data, str):
+ return response.status, response.data
+ return response.status, response.data.decode()
def check(self):
"""
diff --git a/python.d/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/urllib3/contrib/__init__.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py
+++ b/collectors/python.d.plugin/python_modules/bases/__init__.py
diff --git a/python.d/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
index 5394fbf64..2963739ec 100644
--- a/python.d/python_modules/bases/charts.py
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.collection import safe_print
-CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type']
+CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden']
DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
VARIABLE_PARAMS = ['id', 'value']
@@ -13,9 +14,9 @@ DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row',
CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} '' 'python.d.plugin' '{module_name}'\n"
+ "{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} 'obsolete'\n"
+ "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden}'\n"
@@ -151,6 +152,8 @@ class Chart:
id=self.params['id'])
if self.params.get('chart_type') not in CHART_TYPES:
self.params['chart_type'] = 'absolute'
+ hidden = str(self.params.get('hidden', ''))
+ self.params['hidden'] = 'hidden' if hidden == 'hidden' else ''
self.dimensions = list()
self.variables = set()
@@ -304,6 +307,12 @@ class Dimension:
return self.id == other
return self.id == other.id
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
def create(self):
return DIMENSION_CREATE.format(**self.params)
@@ -360,6 +369,9 @@ class ChartVariable:
return self.id == other.id
return False
+ def __ne__(self, other):
+ return not self == other
+
def __hash__(self):
return hash(repr(self))
diff --git a/python.d/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
index e03b4f58e..479a3b610 100644
--- a/python.d/python_modules/bases/collection.py
+++ b/collectors/python.d.plugin/python_modules/bases/collection.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
diff --git a/python.d/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py
index d18b9dcd0..9eb268ce7 100644
--- a/python.d/python_modules/bases/loaders.py
+++ b/collectors/python.d.plugin/python_modules/bases/loaders.py
@@ -1,18 +1,27 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import types
+
from sys import version_info
PY_VERSION = version_info[:2]
+try:
+ if PY_VERSION > (3, 1):
+ from pyyaml3 import SafeLoader as YamlSafeLoader
+ else:
+ from pyyaml2 import SafeLoader as YamlSafeLoader
+except ImportError:
+ from yaml import SafeLoader as YamlSafeLoader
+
+
if PY_VERSION > (3, 1):
- from pyyaml3 import SafeLoader as YamlSafeLoader
from importlib.machinery import SourceFileLoader
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
else:
- from pyyaml2 import SafeLoader as YamlSafeLoader
from imp import load_source as SourceFileLoader
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
@@ -26,6 +35,14 @@ def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
+def safe_load(stream):
+ loader = YamlSafeLoader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+
YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor)
diff --git a/python.d/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
index fc40b83d3..39be77a79 100644
--- a/python.d/python_modules/bases/loggers.py
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description:
# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
import logging
import traceback
diff --git a/python.d/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
index 76e19e13f..4d560e438 100644
--- a/python.d/python_modules/pyyaml2/__init__.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from error import *
diff --git a/python.d/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
index 06e5ac782..6b41b8067 100644
--- a/python.d/python_modules/pyyaml2/composer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Composer', 'ComposerError']
diff --git a/python.d/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
index 635faac3e..8ad1b90a7 100644
--- a/python.d/python_modules/pyyaml2/constructor.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError']
diff --git a/python.d/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
index 68dcd7519..2858ab479 100644
--- a/python.d/python_modules/pyyaml2/cyaml.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
diff --git a/python.d/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
index f811d2c91..3685cbeeb 100644
--- a/python.d/python_modules/pyyaml2/dumper.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
diff --git a/python.d/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
index e5bcdcccb..9a460a0fd 100644
--- a/python.d/python_modules/pyyaml2/emitter.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
diff --git a/python.d/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
index 577686db5..5466be721 100644
--- a/python.d/python_modules/pyyaml2/error.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
diff --git a/python.d/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
index f79ad389c..283452add 100644
--- a/python.d/python_modules/pyyaml2/events.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Abstract classes.
diff --git a/python.d/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
index 293ff467b..1c195531f 100644
--- a/python.d/python_modules/pyyaml2/loader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
diff --git a/python.d/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
index c4f070c41..ed2a1b43e 100644
--- a/python.d/python_modules/pyyaml2/nodes.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
diff --git a/python.d/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
index f9e3057f3..97ba08337 100644
--- a/python.d/python_modules/pyyaml2/parser.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
diff --git a/python.d/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
index 3249e6b9f..8d422954e 100644
--- a/python.d/python_modules/pyyaml2/reader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
diff --git a/python.d/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
index 5f4fc70db..0a1404eca 100644
--- a/python.d/python_modules/pyyaml2/representer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
diff --git a/python.d/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
index 6b5ab8759..49922debf 100644
--- a/python.d/python_modules/pyyaml2/resolver.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseResolver', 'Resolver']
diff --git a/python.d/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
index 5228fad65..971da6127 100644
--- a/python.d/python_modules/pyyaml2/scanner.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Scanner produces tokens of the following types:
# STREAM-START
diff --git a/python.d/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
index 0bf1e96dc..15fdbb0c0 100644
--- a/python.d/python_modules/pyyaml2/serializer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Serializer', 'SerializerError']
diff --git a/python.d/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
index 4d0b48a39..c5c4fb116 100644
--- a/python.d/python_modules/pyyaml2/tokens.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
class Token(object):
def __init__(self, start_mark, end_mark):
diff --git a/python.d/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
index a5e20f94d..a884b33cf 100644
--- a/python.d/python_modules/pyyaml3/__init__.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from .error import *
diff --git a/python.d/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
index d5c6a7acd..c418bba91 100644
--- a/python.d/python_modules/pyyaml3/composer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Composer', 'ComposerError']
diff --git a/python.d/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
index 981543aeb..ee09a7a7e 100644
--- a/python.d/python_modules/pyyaml3/constructor.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError']
diff --git a/python.d/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
index d5cb87e99..e6c16d894 100644
--- a/python.d/python_modules/pyyaml3/cyaml.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
diff --git a/python.d/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
index 0b6912877..ba590c6e6 100644
--- a/python.d/python_modules/pyyaml3/dumper.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
diff --git a/python.d/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
index 34cb145a5..d4be65a8e 100644
--- a/python.d/python_modules/pyyaml3/emitter.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
diff --git a/python.d/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
index b796b4dc5..5fec7d449 100644
--- a/python.d/python_modules/pyyaml3/error.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
diff --git a/python.d/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
index f79ad389c..283452add 100644
--- a/python.d/python_modules/pyyaml3/events.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Abstract classes.
diff --git a/python.d/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
index 08c8f01b3..7ef6cf815 100644
--- a/python.d/python_modules/pyyaml3/loader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
diff --git a/python.d/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
index c4f070c41..ed2a1b43e 100644
--- a/python.d/python_modules/pyyaml3/nodes.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
diff --git a/python.d/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
index 13a5995d2..bcec7f994 100644
--- a/python.d/python_modules/pyyaml3/parser.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
diff --git a/python.d/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
index f70e920f4..0a515fd64 100644
--- a/python.d/python_modules/pyyaml3/reader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
diff --git a/python.d/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
index 67cd6fd25..756a18dcc 100644
--- a/python.d/python_modules/pyyaml3/representer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
diff --git a/python.d/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
index 0eece2582..50945e04d 100644
--- a/python.d/python_modules/pyyaml3/resolver.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['BaseResolver', 'Resolver']
diff --git a/python.d/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
index 494d975ba..b55854e8b 100644
--- a/python.d/python_modules/pyyaml3/scanner.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Scanner produces tokens of the following types:
# STREAM-START
diff --git a/python.d/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
index fe911e67a..1ba2f7f9d 100644
--- a/python.d/python_modules/pyyaml3/serializer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
__all__ = ['Serializer', 'SerializerError']
diff --git a/python.d/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
index 4d0b48a39..c5c4fb116 100644
--- a/python.d/python_modules/pyyaml3/tokens.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
class Token(object):
def __init__(self, start_mark, end_mark):
diff --git a/python.d/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py
index e69de29bb..e69de29bb 100644
--- a/python.d/python_modules/urllib3/packages/backports/__init__.py
+++ b/collectors/python.d.plugin/python_modules/third_party/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
new file mode 100644
index 000000000..ec21779a0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
@@ -0,0 +1,515 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# client.py - Somewhat higher-level GUI_RPC API for BOINC core client
+#
+# Copyright (C) 2013 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
+# Copyright (C) 2017 Austin S. Hemmelgarn
+#
+# SPDX-License-Identifier: GPL-3.0
+
+# Based on client/boinc_cmd.cpp
+
+import hashlib
+import socket
+import sys
+import time
+from functools import total_ordering
+from xml.etree import ElementTree
+
+GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg"
+
+GUI_RPC_HOSTNAME = None # localhost
+GUI_RPC_PORT = 31416
+GUI_RPC_TIMEOUT = 1
+
+class Rpc(object):
+ ''' Class to perform GUI RPC calls to a BOINC core client.
+ Usage in a context manager ('with' block) is recommended to ensure
+ disconnect() is called. Using the same instance for all calls is also
+ recommended so it reuses the same socket connection
+ '''
+ def __init__(self, hostname="", port=0, timeout=0, text_output=False):
+ self.hostname = hostname
+ self.port = port
+ self.timeout = timeout
+ self.sock = None
+ self.text_output = text_output
+
+ @property
+ def sockargs(self):
+ return (self.hostname, self.port, self.timeout)
+
+ def __enter__(self): self.connect(*self.sockargs); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self, hostname="", port=0, timeout=0):
+ ''' Connect to (hostname, port) with timeout in seconds.
+ Hostname defaults to None (localhost), and port to 31416
+ Calling multiple times will disconnect previous connection (if any),
+ and (re-)connect to host.
+ '''
+ if self.sock:
+ self.disconnect()
+
+ self.hostname = hostname or GUI_RPC_HOSTNAME
+ self.port = port or GUI_RPC_PORT
+ self.timeout = timeout or GUI_RPC_TIMEOUT
+
+ self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2])
+
+ def disconnect(self):
+ ''' Disconnect from host. Calling multiple times is OK (idempotent)
+ '''
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+
+ def call(self, request, text_output=None):
+ ''' Do an RPC call. Pack and send the XML request and return the
+ unpacked reply. request can be either plain XML text or a
+ xml.etree.ElementTree.Element object. Return ElementTree.Element
+ or XML text according to text_output flag.
+ Will auto-connect if not connected.
+ '''
+ if text_output is None:
+ text_output = self.text_output
+
+ if not self.sock:
+ self.connect(*self.sockargs)
+
+ if not isinstance(request, ElementTree.Element):
+ request = ElementTree.fromstring(request)
+
+ # pack request
+ end = '\003'
+ if sys.version_info[0] < 3:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end)
+ else:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode()
+
+ try:
+ self.sock.sendall(req)
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout):
+ raise
+
+ req = ""
+ while True:
+ try:
+ buf = self.sock.recv(8192)
+ if not buf:
+ raise socket.error("No data from socket")
+ if sys.version_info[0] >= 3:
+ buf = buf.decode()
+ except socket.error:
+ raise
+ n = buf.find(end)
+ if not n == -1: break
+ req += buf
+ req += buf[:n]
+
+ # unpack reply (remove root tag, ie: first and last lines)
+ req = '\n'.join(req.strip().rsplit('\n')[1:-1])
+
+ if text_output:
+ return req
+ else:
+ return ElementTree.fromstring(req)
+
+def setattrs_from_xml(obj, xml, attrfuncdict={}):
+ ''' Helper to set values for attributes of a class instance by mapping
+ matching tags from a XML file.
+ attrfuncdict is a dict of functions to customize value data type of
+ each attribute. It falls back to simple int/float/bool/str detection
+ based on values defined in __init__(). This would not be needed if
+ Boinc used standard RPC protocol, which includes data type in XML.
+ '''
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+ for e in list(xml):
+ if hasattr(obj, e.tag):
+ attr = getattr(obj, e.tag)
+ attrfunc = attrfuncdict.get(e.tag, None)
+ if attrfunc is None:
+ if isinstance(attr, bool): attrfunc = parse_bool
+ elif isinstance(attr, int): attrfunc = parse_int
+ elif isinstance(attr, float): attrfunc = parse_float
+ elif isinstance(attr, str): attrfunc = parse_str
+ elif isinstance(attr, list): attrfunc = parse_list
+ else: attrfunc = lambda x: x
+ setattr(obj, e.tag, attrfunc(e))
+ else:
+ pass
+ #print "class missing attribute '%s': %r" % (e.tag, obj)
+ return obj
+
+
+def parse_bool(e):
+ ''' Helper to convert ElementTree.Element.text to boolean.
+ Treat '<foo/>' (and '<foo>[[:blank:]]</foo>') as True
+ Treat '0' and 'false' as False
+ '''
+ if e.text is None:
+ return True
+ else:
+ return bool(e.text) and not e.text.strip().lower() in ('0', 'false')
+
+
+def parse_int(e):
+ ''' Helper to convert ElementTree.Element.text to integer.
+ Treat '<foo/>' (and '<foo></foo>') as 0
+ '''
+ # int(float()) allows casting to int a value expressed as float in XML
+ return 0 if e.text is None else int(float(e.text.strip()))
+
+
+def parse_float(e):
+ ''' Helper to convert ElementTree.Element.text to float. '''
+ return 0.0 if e.text is None else float(e.text.strip())
+
+
+def parse_str(e):
+ ''' Helper to convert ElementTree.Element.text to string. '''
+ return "" if e.text is None else e.text.strip()
+
+
+def parse_list(e):
+ ''' Helper to convert ElementTree.Element to list. For now, simply return
+ the list of root element's children
+ '''
+ return list(e)
+
+
+class Enum(object):
+ UNKNOWN = -1 # Not in original API
+
+ @classmethod
+ def name(cls, value):
+ ''' Quick-and-dirty fallback for getting the "name" of an enum item '''
+
+ # value as string, if it matches an enum attribute.
+ # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE)
+ if hasattr(cls, str(value)):
+ return cls.name(getattr(cls, value, None))
+
+ # value not handled in subclass name()
+ for k, v in cls.__dict__.items():
+ if v == value:
+ return k.lower().replace('_', ' ')
+
+ # value not found
+ return cls.name(Enum.UNKNOWN)
+
+
+class CpuSched(Enum):
+ ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state
+ "SCHEDULED" is synonymous with "executing" except when CPU throttling
+ is in use.
+ '''
+ UNINITIALIZED = 0
+ PREEMPTED = 1
+ SCHEDULED = 2
+
+
+class ResultState(Enum):
+ ''' Values of RESULT::state in client.
+ THESE MUST BE IN NUMERICAL ORDER
+ (because of the > comparison in RESULT::computing_done())
+ see html/inc/common_defs.inc
+ '''
+ NEW = 0
+ #// New result
+ FILES_DOWNLOADING = 1
+ #// Input files for result (WU, app version) are being downloaded
+ FILES_DOWNLOADED = 2
+ #// Files are downloaded, result can be (or is being) computed
+ COMPUTE_ERROR = 3
+ #// computation failed; no file upload
+ FILES_UPLOADING = 4
+ #// Output files for result are being uploaded
+ FILES_UPLOADED = 5
+ #// Files are uploaded, notify scheduling server at some point
+ ABORTED = 6
+ #// result was aborted
+ UPLOAD_FAILED = 7
+ #// some output file permanent failure
+
+
+class Process(Enum):
+ ''' values of ACTIVE_TASK::task_state '''
+ UNINITIALIZED = 0
+ #// process doesn't exist yet
+ EXECUTING = 1
+ #// process is running, as far as we know
+ SUSPENDED = 9
+ #// we've sent it a "suspend" message
+ ABORT_PENDING = 5
+ #// process exceeded limits; send "abort" message, waiting to exit
+ QUIT_PENDING = 8
+ #// we've sent it a "quit" message, waiting to exit
+ COPY_PENDING = 10
+ #// waiting for async file copies to finish
+
+
+class _Struct(object):
+ ''' base helper class with common methods for all classes derived from
+ BOINC's C++ structs
+ '''
+ @classmethod
+ def parse(cls, xml):
+ return setattrs_from_xml(cls(), xml)
+
+ def __str__(self, indent=0):
+ buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ buf += '{0}\t{1} [\n'.format('\t' * indent, attr)
+ for v in value: buf += '\t\t{0}\t\t,\n'.format(v)
+ buf += '\t]\n'
+ else:
+ buf += '{0}\t{1}\t{2}\n'.format('\t' * indent,
+ attr,
+ value.__str__(indent+2)
+ if isinstance(value, _Struct)
+ else repr(value))
+ return buf
+
+
+@total_ordering
+class VersionInfo(_Struct):
+ def __init__(self, major=0, minor=0, release=0):
+ self.major = major
+ self.minor = minor
+ self.release = release
+
+ @property
+ def _tuple(self):
+ return (self.major, self.minor, self.release)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self._tuple == other._tuple
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __gt__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self._tuple > other._tuple
+
+ def __str__(self):
+ return "{0}.{1}.{2}".format(self.major, self.minor, self.release)
+
+ def __repr__(self):
+ return "{0}{1}".format(self.__class__.__name__, self._tuple)
+
+
+class Result(_Struct):
+ ''' Also called "task" in some contexts '''
+ def __init__(self):
+ # Names and values follow lib/gui_rpc_client.h @ RESULT
+ # Order too, except when grouping contradicts client/result.cpp
+ # RESULT::write_gui(), then XML order is used.
+
+ self.name = ""
+ self.wu_name = ""
+ self.version_num = 0
+ #// identifies the app used
+ self.plan_class = ""
+ self.project_url = "" # from PROJECT.master_url
+ self.report_deadline = 0.0 # seconds since epoch
+ self.received_time = 0.0 # seconds since epoch
+ #// when we got this from server
+ self.ready_to_report = False
+ #// we're ready to report this result to the server;
+ #// either computation is done and all the files have been uploaded
+ #// or there was an error
+ self.got_server_ack = False
+ #// we've received the ack for this result from the server
+ self.final_cpu_time = 0.0
+ self.final_elapsed_time = 0.0
+ self.state = ResultState.NEW
+ self.estimated_cpu_time_remaining = 0.0
+ #// actually, estimated elapsed time remaining
+ self.exit_status = 0
+ #// return value from the application
+ self.suspended_via_gui = False
+ self.project_suspended_via_gui = False
+ self.edf_scheduled = False
+ #// temporary used to tell GUI that this result is deadline-scheduled
+ self.coproc_missing = False
+ #// a coproc needed by this job is missing
+ #// (e.g. because user removed their GPU board).
+ self.scheduler_wait = False
+ self.scheduler_wait_reason = ""
+ self.network_wait = False
+ self.resources = ""
+ #// textual description of resources used
+
+ #// the following defined if active
+ # XML is generated in client/app.cpp ACTIVE_TASK::write_gui()
+ self.active_task = False
+ self.active_task_state = Process.UNINITIALIZED
+ self.app_version_num = 0
+ self.slot = -1
+ self.pid = 0
+ self.scheduler_state = CpuSched.UNINITIALIZED
+ self.checkpoint_cpu_time = 0.0
+ self.current_cpu_time = 0.0
+ self.fraction_done = 0.0
+ self.elapsed_time = 0.0
+ self.swap_size = 0
+ self.working_set_size_smoothed = 0.0
+ self.too_large = False
+ self.needs_shmem = False
+ self.graphics_exec_path = ""
+ self.web_graphics_url = ""
+ self.remote_desktop_addr = ""
+ self.slot_path = ""
+ #// only present if graphics_exec_path is
+
+ # The following are not in original API, but are present in RPC XML reply
+ self.completed_time = 0.0
+ #// time when ready_to_report was set
+ self.report_immediately = False
+ self.working_set_size = 0
+ self.page_fault_rate = 0.0
+ #// derived by higher-level code
+
+ # The following are in API, but are NEVER in RPC XML reply. Go figure
+ self.signal = 0
+
+ self.app = None # APP*
+ self.wup = None # WORKUNIT*
+ self.project = None # PROJECT*
+ self.avp = None # APP_VERSION*
+
+ @classmethod
+ def parse(cls, xml):
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+
+ # parse main XML
+ result = super(Result, cls).parse(xml)
+
+ # parse '<active_task>' children
+ active_task = xml.find('active_task')
+ if active_task is None:
+ result.active_task = False # already the default after __init__()
+ else:
+ result.active_task = True # already the default after main parse
+ result = setattrs_from_xml(result, active_task)
+
+ #// if CPU time is nonzero but elapsed time is zero,
+ #// we must be talking to an old client.
+ #// Set elapsed = CPU
+ #// (easier to deal with this here than in the manager)
+ if result.current_cpu_time != 0 and result.elapsed_time == 0:
+ result.elapsed_time = result.current_cpu_time
+
+ if result.final_cpu_time != 0 and result.final_elapsed_time == 0:
+ result.final_elapsed_time = result.final_cpu_time
+
+ return result
+
+ def __str__(self):
+ buf = '{0}:\n'.format(self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if attr in ['received_time', 'report_deadline']:
+ value = time.ctime(value)
+ buf += '\t{0}\t{1}\n'.format(attr, value)
+ return buf
+
+
+class BoincClient(object):
+
+ def __init__(self, host="", port=0, passwd=None):
+ self.hostname = host
+ self.port = port
+ self.passwd = passwd
+ self.rpc = Rpc(text_output=False)
+ self.version = None
+ self.authorized = False
+
+ # Informative, not authoritative. Records status of *last* RPC call,
+ # but does not infer success about the *next* one.
+ # Thus, it should be read *after* an RPC call, not prior to one
+ self.connected = False
+
+ def __enter__(self): self.connect(); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self):
+ try:
+ self.rpc.connect(self.hostname, self.port)
+ self.connected = True
+ except socket.error:
+ self.connected = False
+ return
+ self.authorized = self.authorize(self.passwd)
+ self.version = self.exchange_versions()
+
+ def disconnect(self):
+ self.rpc.disconnect()
+
+ def authorize(self, password):
+ ''' Request authorization. If password is None and we are connecting
+ to localhost, try to read password from the local config file
+ GUI_RPC_PASSWD_FILE. If file can't be read (not found or no
+ permission to read), try to authorize with a blank password.
+ If authorization is requested and fails, all subsequent calls
+ will be refused with socket.error 'Connection reset by peer' (104).
+ Since most local calls do no require authorization, do not attempt
+ it if you're not sure about the password.
+ '''
+ if password is None and not self.hostname:
+ password = read_gui_rpc_password() or ""
+ nonce = self.rpc.call('<auth1/>').text
+ authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower()
+ reply = self.rpc.call('<auth2><nonce_hash>{0}</nonce_hash></auth2>'.format(authhash))
+
+ if reply.tag == 'authorized':
+ return True
+ else:
+ return False
+
+ def exchange_versions(self):
+ ''' Return VersionInfo instance with core client version info '''
+ return VersionInfo.parse(self.rpc.call('<exchange_versions/>'))
+
+ def get_tasks(self):
+ ''' Same as get_results(active_only=False) '''
+ return self.get_results(False)
+
+ def get_results(self, active_only=False):
+ ''' Get a list of results.
+ Those that are in progress will have information such as CPU time
+ and fraction done. Each result includes a name;
+ Use CC_STATE::lookup_result() to find this result in the current static state;
+ if it's not there, call get_state() again.
+ '''
+ reply = self.rpc.call("<get_results><active_only>{0}</active_only></get_results>".format(1 if active_only else 0))
+ if not reply.tag == 'results':
+ return []
+
+ results = []
+ for item in list(reply):
+ results.append(Result.parse(item))
+
+ return results
+
+
+def read_gui_rpc_password():
+ ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR
+ (if any), and return it
+ '''
+ try:
+ with open(GUI_RPC_PASSWD_FILE, 'r') as f:
+ buf = f.read()
+ if buf.endswith('\n'): return buf[:-1] # trim last CR
+ else: return buf
+ except IOError:
+ # Permission denied or File not found.
+ pass
diff --git a/python.d/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
index 1d868f0e2..f10cd6209 100644
--- a/python.d/python_modules/third_party/lm_sensors.py
+++ b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: LGPL-2.1
"""
@package sensors.py
Python Bindings for libsensors3
@@ -254,4 +255,4 @@ class SubFeatureIterator:
return subfeature
def next(self): # python2 compability
- return self.__next__() \ No newline at end of file
+ return self.__next__()
diff --git a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
new file mode 100644
index 000000000..a65a304b6
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
@@ -0,0 +1,74 @@
+# Minecraft Remote Console module.
+#
+# Copyright (C) 2015 Barnaby Gale
+#
+# SPDX-License-Identifier: MIT
+
+import socket
+import select
+import struct
+import time
+
+
+class MCRconException(Exception):
+ pass
+
+
+class MCRcon(object):
+ socket = None
+
+ def connect(self, host, port, password):
+ if self.socket is not None:
+ raise MCRconException("Already connected")
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.socket.settimeout(0.9)
+ self.socket.connect((host, port))
+ self.send(3, password)
+
+ def disconnect(self):
+ if self.socket is None:
+ raise MCRconException("Already disconnected")
+ self.socket.close()
+ self.socket = None
+
+ def read(self, length):
+ data = b""
+ while len(data) < length:
+ data += self.socket.recv(length - len(data))
+ return data
+
+ def send(self, out_type, out_data):
+ if self.socket is None:
+ raise MCRconException("Must connect before sending data")
+
+ # Send a request packet
+ out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00'
+ out_length = struct.pack('<i', len(out_payload))
+ self.socket.send(out_length + out_payload)
+
+ # Read response packets
+ in_data = ""
+ while True:
+ # Read a packet
+ in_length, = struct.unpack('<i', self.read(4))
+ in_payload = self.read(in_length)
+ in_id = struct.unpack('<ii', in_payload[:8])
+ in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
+
+ # Sanity checks
+ if in_padding != b'\x00\x00':
+ raise MCRconException("Incorrect padding")
+ if in_id == -1:
+ raise MCRconException("Login failed")
+
+ # Record the response
+ in_data += in_data_partial.decode('utf8')
+
+ # If there's nothing more to receive, return the response
+ if len(select.select([self.socket], [], [], 0)[0]) == 0:
+ return in_data
+
+ def command(self, command):
+ result = self.send(2, command)
+ time.sleep(0.003) # MC-72390 workaround
+ return result
diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
new file mode 100644
index 000000000..da04bb857
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: Apache-2.0
+"""
+ monotonic
+ ~~~~~~~~~
+
+ This module provides a ``monotonic()`` function which returns the
+ value (in fractional seconds) of a clock which never goes backwards.
+
+ On Python 3.3 or newer, ``monotonic`` will be an alias of
+ ``time.monotonic`` from the standard library. On older versions,
+ it will fall back to an equivalent implementation:
+
+ +-------------+----------------------------------------+
+ | Linux, BSD | ``clock_gettime(3)`` |
+ +-------------+----------------------------------------+
+ | Windows | ``GetTickCount`` or ``GetTickCount64`` |
+ +-------------+----------------------------------------+
+ | OS X | ``mach_absolute_time`` |
+ +-------------+----------------------------------------+
+
+ If no suitable implementation exists for the current platform,
+ attempting to import this module (or to import from it) will
+ cause a ``RuntimeError`` exception to be raised.
+
+
+ Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org>
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+"""
+import time
+
+
+__all__ = ('monotonic',)
+
+
+try:
+ monotonic = time.monotonic
+except AttributeError:
+ import ctypes
+ import ctypes.util
+ import os
+ import sys
+ import threading
+ try:
+ if sys.platform == 'darwin': # OS X, iOS
+ # See Technical Q&A QA1398 of the Mac Developer Library:
+ # <https://developer.apple.com/library/mac/qa/qa1398/>
+ libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
+
+ class mach_timebase_info_data_t(ctypes.Structure):
+ """System timebase info. Defined in <mach/mach_time.h>."""
+ _fields_ = (('numer', ctypes.c_uint32),
+ ('denom', ctypes.c_uint32))
+
+ mach_absolute_time = libc.mach_absolute_time
+ mach_absolute_time.restype = ctypes.c_uint64
+
+ timebase = mach_timebase_info_data_t()
+ libc.mach_timebase_info(ctypes.byref(timebase))
+ ticks_per_second = timebase.numer / timebase.denom * 1.0e9
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return mach_absolute_time() / ticks_per_second
+
+ elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
+ if sys.platform.startswith('cygwin'):
+ # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since
+ # version 1.7.6. Using raw WinAPI for maximum version compatibility.
+
+ # Ugly hack using the wrong calling convention (in 32-bit mode)
+ # because ctypes has no windll under cygwin (and it also seems that
+ # the code letting you select stdcall in _ctypes doesn't exist under
+ # the preprocessor definitions relevant to cygwin).
+ # This is 'safe' because:
+ # 1. The ABI of GetTickCount and GetTickCount64 is identical for
+ # both calling conventions because they both have no parameters.
+ # 2. libffi masks the problem because after making the call it doesn't
+ # touch anything through esp and epilogue code restores a correct
+ # esp from ebp afterwards.
+ try:
+ kernel32 = ctypes.cdll.kernel32
+ except OSError: # 'No such file or directory'
+ kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll')
+ else:
+ kernel32 = ctypes.windll.kernel32
+
+ GetTickCount64 = getattr(kernel32, 'GetTickCount64', None)
+ if GetTickCount64:
+ # Windows Vista / Windows Server 2008 or newer.
+ GetTickCount64.restype = ctypes.c_ulonglong
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return GetTickCount64() / 1000.0
+
+ else:
+ # Before Windows Vista.
+ GetTickCount = kernel32.GetTickCount
+ GetTickCount.restype = ctypes.c_uint32
+
+ get_tick_count_lock = threading.Lock()
+ get_tick_count_last_sample = 0
+ get_tick_count_wraparounds = 0
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ global get_tick_count_last_sample
+ global get_tick_count_wraparounds
+
+ with get_tick_count_lock:
+ current_sample = GetTickCount()
+ if current_sample < get_tick_count_last_sample:
+ get_tick_count_wraparounds += 1
+ get_tick_count_last_sample = current_sample
+
+ final_milliseconds = get_tick_count_wraparounds << 32
+ final_milliseconds += get_tick_count_last_sample
+ return final_milliseconds / 1000.0
+
+ else:
+ try:
+ clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
+ use_errno=True).clock_gettime
+ except Exception:
+ clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
+ use_errno=True).clock_gettime
+
+ class timespec(ctypes.Structure):
+ """Time specification, as described in clock_gettime(3)."""
+ _fields_ = (('tv_sec', ctypes.c_long),
+ ('tv_nsec', ctypes.c_long))
+
+ if sys.platform.startswith('linux'):
+ CLOCK_MONOTONIC = 1
+ elif sys.platform.startswith('freebsd'):
+ CLOCK_MONOTONIC = 4
+ elif sys.platform.startswith('sunos5'):
+ CLOCK_MONOTONIC = 4
+ elif 'bsd' in sys.platform:
+ CLOCK_MONOTONIC = 3
+ elif sys.platform.startswith('aix'):
+ CLOCK_MONOTONIC = ctypes.c_longlong(10)
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ ts = timespec()
+ if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
+ errno = ctypes.get_errno()
+ raise OSError(errno, os.strerror(errno))
+ return ts.tv_sec + ts.tv_nsec / 1.0e9
+
+ # Perform a sanity-check.
+ if monotonic() - monotonic() > 0:
+ raise ValueError('monotonic() is not monotonic!')
+
+ except Exception as e:
+ raise RuntimeError('no suitable implementation for this system: ' + repr(e))
diff --git a/python.d/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
index d0b97d47c..589401b8f 100644
--- a/python.d/python_modules/third_party/ordereddict.py
+++ b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
@@ -1,24 +1,6 @@
# Copyright (c) 2009 Raymond Hettinger
#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction,
-# including without limitation the rights to use, copy, modify, merge,
-# publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
+# SPDX-License-Identifier: MIT
from UserDict import DictMixin
diff --git a/python.d/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
index 26493ecb9..3add84816 100644
--- a/python.d/python_modules/urllib3/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
diff --git a/python.d/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
index 4849ddecd..c1d2fad36 100644
--- a/python.d/python_modules/urllib3/_collections.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from collections import Mapping, MutableMapping
try:
diff --git a/python.d/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py
index c0d832998..f757493c7 100644
--- a/python.d/python_modules/urllib3/connection.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/connection.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import datetime
import logging
diff --git a/python.d/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
index b4f1166a6..90e4c86a5 100644
--- a/python.d/python_modules/urllib3/connectionpool.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import errno
import logging
diff --git a/src/.keep b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
index e69de29bb..e69de29bb 100644
--- a/src/.keep
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
index e26b84086..bb826673f 100644
--- a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
This module uses ctypes to bind a whole bunch of functions and constants from
SecureTransport. The goal here is to provide the low-level API to
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
index 5e3494bce..0f79a1372 100644
--- a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
Low-level helpers for the SecureTransport bindings.
diff --git a/python.d/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
index 814b0222d..e74589fa8 100644
--- a/python.d/python_modules/urllib3/contrib/appengine.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
diff --git a/python.d/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
index 642e99ed2..3f8c9ebf5 100644
--- a/python.d/python_modules/urllib3/contrib/ntlmpool.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
NTLM authenticating pool, contributed by erikcederstran
diff --git a/python.d/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
index 6645dbaa9..8d373507d 100644
--- a/python.d/python_modules/urllib3/contrib/pyopenssl.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
diff --git a/python.d/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
index 72b23ab1c..fcc30118c 100644
--- a/python.d/python_modules/urllib3/contrib/securetransport.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
"""
SecureTranport support for urllib3 via ctypes.
diff --git a/python.d/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
index 39e92fde1..1cb79285b 100644
--- a/python.d/python_modules/urllib3/contrib/socks.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
"""
This module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
diff --git a/python.d/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
index 6c4be5810..a71cabe06 100644
--- a/python.d/python_modules/urllib3/exceptions.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from .packages.six.moves.http_client import (
IncompleteRead as httplib_IncompleteRead
diff --git a/python.d/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py
index 19b0ae0c8..de7577b74 100644
--- a/python.d/python_modules/urllib3/fields.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/fields.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import email.utils
import mimetypes
diff --git a/python.d/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
index cd11cee46..3febc9cfe 100644
--- a/python.d/python_modules/urllib3/filepost.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import codecs
diff --git a/python.d/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
index 170e974c1..170e974c1 100644
--- a/python.d/python_modules/urllib3/packages/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
diff --git a/python.d/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
index 75b80dcf8..8ab122f8b 100644
--- a/python.d/python_modules/urllib3/packages/backports/makefile.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
"""
backports.makefile
~~~~~~~~~~~~~~~~~~
diff --git a/python.d/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
index 4479363cc..9f7c0e6b8 100644
--- a/python.d/python_modules/urllib3/packages/ordered_dict.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
@@ -2,6 +2,7 @@
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
+# SPDX-License-Identifier: MIT
try:
from thread import get_ident as _get_ident
except ImportError:
diff --git a/python.d/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
index 190c0239c..31df5012b 100644
--- a/python.d/python_modules/urllib3/packages/six.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
@@ -2,23 +2,7 @@
# Copyright (c) 2010-2015 Benjamin Peterson
#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
index d6594eb26..2aeeeff91 100644
--- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
import sys
try:
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
index 1fd42f38a..647e081da 100644
--- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
@@ -1,7 +1,6 @@
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
-# Note: This file is under the PSF license as the code comes from the python
-# stdlib. http://docs.python.org/3/license.html
+# SPDX-License-Identifier: Python-2.0
import re
import sys
diff --git a/python.d/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
index 4ae91744d..adea9bc01 100644
--- a/python.d/python_modules/urllib3/poolmanager.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import collections
import functools
diff --git a/python.d/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py
index c0fddff04..f78331975 100644
--- a/python.d/python_modules/urllib3/request.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/request.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from .filepost import encode_multipart_formdata
diff --git a/python.d/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py
index 408d9996a..cf14a3076 100644
--- a/python.d/python_modules/urllib3/response.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/response.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from contextlib import contextmanager
import zlib
diff --git a/python.d/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
index 2f2770b62..bba628d98 100644
--- a/python.d/python_modules/urllib3/util/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
diff --git a/python.d/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
index bf699cfd0..3bd69e8fa 100644
--- a/python.d/python_modules/urllib3/util/connection.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import socket
from .wait import wait_for_read
diff --git a/python.d/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
index 3ddfcd559..18f27b032 100644
--- a/python.d/python_modules/urllib3/util/request.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from base64 import b64encode
diff --git a/python.d/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
index 67cf730ab..e4cda93d4 100644
--- a/python.d/python_modules/urllib3/util/response.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from ..packages.six.moves import http_client as httplib
diff --git a/python.d/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
index c603cb490..61e63afec 100644
--- a/python.d/python_modules/urllib3/util/retry.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import time
import logging
diff --git a/python.d/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
index d75cb266b..c0997b1a2 100644
--- a/python.d/python_modules/urllib3/util/selectors.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
diff --git a/python.d/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
index 33d428ed8..ece3ec39e 100644
--- a/python.d/python_modules/urllib3/util/ssl_.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import errno
import warnings
diff --git a/python.d/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
index cec817e6e..4041cf9b9 100644
--- a/python.d/python_modules/urllib3/util/timeout.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
diff --git a/python.d/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
index 6b6f9968d..99fd6534a 100644
--- a/python.d/python_modules/urllib3/util/url.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from collections import namedtuple
diff --git a/python.d/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
index cb396e508..21e72979c 100644
--- a/python.d/python_modules/urllib3/util/wait.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
from .selectors import (
HAS_SELECT,
DefaultSelector,
diff --git a/collectors/python.d.plugin/rabbitmq/Makefile.inc b/collectors/python.d.plugin/rabbitmq/Makefile.inc
new file mode 100644
index 000000000..7e67ef512
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rabbitmq/rabbitmq.chart.py
+dist_pythonconfig_DATA += rabbitmq/rabbitmq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rabbitmq/README.md rabbitmq/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
new file mode 100644
index 000000000..22d367c4d
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -0,0 +1,56 @@
+# rabbitmq
+
+Module monitor rabbitmq performance and health metrics.
+
+Following charts are drawn:
+
+1. **Queued Messages**
+ * ready
+ * unacknowledged
+
+2. **Message Rates**
+ * ack
+ * redelivered
+ * deliver
+ * publish
+
+3. **Global Counts**
+ * channels
+ * consumers
+ * connections
+ * queues
+ * exchanges
+
+4. **File Descriptors**
+ * used descriptors
+
+5. **Socket Descriptors**
+ * used descriptors
+
+6. **Erlang processes**
+ * used processes
+
+7. **Erlang run queue**
+ * Erlang run queue
+
+8. **Memory**
+ * free memory in megabytes
+
+9. **Disk Space**
+ * free disk space in gigabytes
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 15672
+ user : 'guest'
+ pass : 'guest'
+
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
+
+---
diff --git a/python.d/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
index b8847e9f8..8298b4032 100644
--- a/python.d/rabbitmq.chart.py
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: rabbitmq netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
from collections import namedtuple
from json import loads
@@ -20,91 +21,104 @@ retries = 60
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
-NODE_STATS = ['fd_used',
- 'mem_used',
- 'sockets_used',
- 'proc_used',
- 'disk_free',
- 'run_queue'
- ]
-OVERVIEW_STATS = ['object_totals.channels',
- 'object_totals.consumers',
- 'object_totals.connections',
- 'object_totals.queues',
- 'object_totals.exchanges',
- 'queue_totals.messages_ready',
- 'queue_totals.messages_unacknowledged',
- 'message_stats.ack',
- 'message_stats.redeliver',
- 'message_stats.deliver',
- 'message_stats.publish'
- ]
-ORDER = ['queued_messages', 'message_rates', 'global_counts',
- 'file_descriptors', 'socket_descriptors', 'erlang_processes', 'erlang_run_queue', 'memory', 'disk_space']
+NODE_STATS = [
+ 'fd_used',
+ 'mem_used',
+ 'sockets_used',
+ 'proc_used',
+ 'disk_free',
+ 'run_queue'
+]
+
+OVERVIEW_STATS = [
+ 'object_totals.channels',
+ 'object_totals.consumers',
+ 'object_totals.connections',
+ 'object_totals.queues',
+ 'object_totals.exchanges',
+ 'queue_totals.messages_ready',
+ 'queue_totals.messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.redeliver',
+ 'message_stats.deliver',
+ 'message_stats.publish'
+]
+
+ORDER = [
+ 'queued_messages',
+ 'message_rates',
+ 'global_counts',
+ 'file_descriptors',
+ 'socket_descriptors',
+ 'erlang_processes',
+ 'erlang_run_queue',
+ 'memory',
+ 'disk_space'
+]
CHARTS = {
'file_descriptors': {
- 'options': [None, 'File Descriptors', 'descriptors', 'overview',
- 'rabbitmq.file_descriptors', 'line'],
+ 'options': [None, 'File Descriptors', 'descriptors', 'overview', 'rabbitmq.file_descriptors', 'line'],
'lines': [
['fd_used', 'used', 'absolute']
- ]},
+ ]
+ },
'memory': {
- 'options': [None, 'Memory', 'MB', 'overview',
- 'rabbitmq.memory', 'line'],
+ 'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'line'],
'lines': [
['mem_used', 'used', 'absolute', 1, 1024 << 10]
- ]},
+ ]
+ },
'disk_space': {
- 'options': [None, 'Disk Space', 'GB', 'overview',
- 'rabbitmq.disk_space', 'line'],
+ 'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'line'],
'lines': [
['disk_free', 'free', 'absolute', 1, 1024 ** 3]
- ]},
+ ]
+ },
'socket_descriptors': {
- 'options': [None, 'Socket Descriptors', 'descriptors', 'overview',
- 'rabbitmq.sockets', 'line'],
+ 'options': [None, 'Socket Descriptors', 'descriptors', 'overview', 'rabbitmq.sockets', 'line'],
'lines': [
['sockets_used', 'used', 'absolute']
- ]},
+ ]
+ },
'erlang_processes': {
- 'options': [None, 'Erlang Processes', 'processes', 'overview',
- 'rabbitmq.processes', 'line'],
+ 'options': [None, 'Erlang Processes', 'processes', 'overview', 'rabbitmq.processes', 'line'],
'lines': [
['proc_used', 'used', 'absolute']
- ]},
+ ]
+ },
'erlang_run_queue': {
- 'options': [None, 'Erlang Run Queue', 'processes', 'overview',
- 'rabbitmq.erlang_run_queue', 'line'],
+ 'options': [None, 'Erlang Run Queue', 'processes', 'overview', 'rabbitmq.erlang_run_queue', 'line'],
'lines': [
- ['run_queue',' length', 'absolute']
- ]},
+ ['run_queue', 'length', 'absolute']
+ ]
+ },
'global_counts': {
- 'options': [None, 'Global Counts', 'counts', 'overview',
- 'rabbitmq.global_counts', 'line'],
+ 'options': [None, 'Global Counts', 'counts', 'overview', 'rabbitmq.global_counts', 'line'],
'lines': [
['object_totals_channels', 'channels', 'absolute'],
['object_totals_consumers', 'consumers', 'absolute'],
['object_totals_connections', 'connections', 'absolute'],
['object_totals_queues', 'queues', 'absolute'],
['object_totals_exchanges', 'exchanges', 'absolute']
- ]},
+ ]
+ },
'queued_messages': {
- 'options': [None, 'Queued Messages', 'messages', 'overview',
- 'rabbitmq.queued_messages', 'stacked'],
+ 'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'],
'lines': [
['queue_totals_messages_ready', 'ready', 'absolute'],
['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute']
- ]},
+ ]
+ },
'message_rates': {
- 'options': [None, 'Message Rates', 'messages/s', 'overview',
- 'rabbitmq.message_rates', 'stacked'],
+ 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'stacked'],
'lines': [
['message_stats_ack', 'ack', 'incremental'],
['message_stats_redeliver', 'redeliver', 'incremental'],
['message_stats_deliver', 'deliver', 'incremental'],
['message_stats_publish', 'publish', 'incremental']
- ]}
+ ]
+ }
}
diff --git a/conf.d/python.d/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
index 3f90da8a2..3f90da8a2 100644
--- a/conf.d/python.d/rabbitmq.conf
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
diff --git a/collectors/python.d.plugin/redis/Makefile.inc b/collectors/python.d.plugin/redis/Makefile.inc
new file mode 100644
index 000000000..6aab08977
--- /dev/null
+++ b/collectors/python.d.plugin/redis/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += redis/redis.chart.py
+dist_pythonconfig_DATA += redis/redis.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += redis/README.md redis/Makefile.inc
+
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
new file mode 100644
index 000000000..8d21df0ca
--- /dev/null
+++ b/collectors/python.d.plugin/redis/README.md
@@ -0,0 +1,42 @@
+# redis
+
+Get INFO data from redis instance.
+
+Following charts are drawn:
+
+1. **Operations** per second
+ * operations
+
+2. **Hit rate** in percent
+ * rate
+
+3. **Memory utilization** in kilobytes
+ * total
+ * lua
+
+4. **Database keys**
+ * lines are creates dynamically based on how many databases are there
+
+5. **Clients**
+ * connected
+ * blocked
+
+6. **Slaves**
+ * connected
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 6379
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
+
+---
diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
new file mode 100644
index 000000000..37d55ebfe
--- /dev/null
+++ b/collectors/python.d.plugin/redis/redis.chart.py
@@ -0,0 +1,261 @@
+# -*- coding: utf-8 -*-
+# Description: redis netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+REDIS_ORDER = [
+ 'operations',
+ 'hit_rate',
+ 'memory',
+ 'keys_redis',
+ 'eviction',
+ 'net',
+ 'connections',
+ 'clients',
+ 'slaves',
+ 'persistence',
+ 'bgsave_now',
+ 'bgsave_health',
+ 'uptime',
+]
+
+PIKA_ORDER = [
+ 'operations',
+ 'hit_rate',
+ 'memory',
+ 'keys_pika',
+ 'connections',
+ 'clients',
+ 'slaves',
+ 'uptime',
+]
+
+
+CHARTS = {
+ 'operations': {
+ 'options': [None, 'Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
+ 'lines': [
+ ['total_commands_processed', 'commands', 'incremental'],
+ ['instantaneous_ops_per_sec', 'operations', 'absolute']
+ ]
+ },
+ 'hit_rate': {
+ 'options': [None, 'Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'],
+ 'lines': [
+ ['hit_rate', 'rate', 'absolute']
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'],
+ 'lines': [
+ ['used_memory', 'total', 'absolute', 1, 1024],
+ ['used_memory_lua', 'lua', 'absolute', 1, 1024]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
+ 'lines': [
+ ['total_net_input_bytes', 'in', 'incremental', 8, 1024],
+ ['total_net_output_bytes', 'out', 'incremental', -8, 1024]
+ ]
+ },
+ 'keys_redis': {
+ 'options': [None, 'Keys per Database', 'keys', 'keys', 'redis.keys', 'line'],
+ 'lines': []
+ },
+ 'keys_pika': {
+ 'options': [None, 'Keys', 'keys', 'keys', 'redis.keys', 'line'],
+ 'lines': [
+ ['kv_keys', 'kv', 'absolute'],
+ ['hash_keys', 'hash', 'absolute'],
+ ['list_keys', 'list', 'absolute'],
+ ['zset_keys', 'zset', 'absolute'],
+ ['set_keys', 'set', 'absolute']
+ ]
+ },
+ 'eviction': {
+ 'options': [None, 'Evicted Keys', 'keys', 'keys', 'redis.eviction', 'line'],
+ 'lines': [
+ ['evicted_keys', 'evicted', 'absolute']
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'redis.connections', 'line'],
+ 'lines': [
+ ['total_connections_received', 'received', 'incremental', 1],
+ ['rejected_connections', 'rejected', 'incremental', -1]
+ ]
+ },
+ 'clients': {
+ 'options': [None, 'Clients', 'clients', 'connections', 'redis.clients', 'line'],
+ 'lines': [
+ ['connected_clients', 'connected', 'absolute', 1],
+ ['blocked_clients', 'blocked', 'absolute', -1]
+ ]
+ },
+ 'slaves': {
+ 'options': [None, 'Slaves', 'slaves', 'replication', 'redis.slaves', 'line'],
+ 'lines': [
+ ['connected_slaves', 'connected', 'absolute']
+ ]
+ },
+ 'persistence': {
+ 'options': [None, 'Persistence Changes Since Last Save', 'changes', 'persistence',
+ 'redis.rdb_changes', 'line'],
+ 'lines': [
+ ['rdb_changes_since_last_save', 'changes', 'absolute']
+ ]
+ },
+ 'bgsave_now': {
+ 'options': [None, 'Duration of the RDB Save Operation', 'seconds', 'persistence',
+ 'redis.bgsave_now', 'absolute'],
+ 'lines': [
+ ['rdb_bgsave_in_progress', 'rdb save', 'absolute']
+ ]
+ },
+ 'bgsave_health': {
+ 'options': [None, 'Status of the Last RDB Save Operation', 'status', 'persistence',
+ 'redis.bgsave_health', 'line'],
+ 'lines': [
+ ['rdb_last_bgsave_status', 'rdb save', 'absolute']
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'redis.uptime', 'line'],
+ 'lines': [
+ ['uptime_in_seconds', 'uptime', 'absolute']
+ ]
+ }
+}
+
+
+def copy_chart(name):
+ return {name: deepcopy(CHARTS[name])}
+
+
+RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)')
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self._keep_alive = True
+
+ self.order = list()
+ self.definitions = dict()
+
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 6379)
+ self.unix_socket = self.configuration.get('socket')
+ p = self.configuration.get('pass')
+
+ self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
+ self.request = 'INFO\r\n'.encode()
+ self.bgsave_time = 0
+
+ def do_auth(self):
+ resp = self._get_raw_data(request=self.auth_request)
+ if not resp:
+ return False
+ if resp.strip() != '+OK':
+ self.error('invalid password')
+ return False
+ return True
+
+ def get_raw_and_parse(self):
+ if self.auth_request and not self.do_auth():
+ return None
+
+ resp = self._get_raw_data()
+
+ if not resp:
+ return None
+
+ parsed = RE.findall(resp)
+
+ if not parsed:
+ self.error('response is invalid/empty')
+ return None
+
+ return dict((k.replace(' ', '_'), v) for k, v in parsed)
+
+ def get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ data = self.get_raw_and_parse()
+
+ if not data:
+ return None
+
+ try:
+ data['hit_rate'] = (
+ (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits']) + int(data['keyspace_misses']))
+ )
+ except (KeyError, ZeroDivisionError):
+ data['hit_rate'] = 0
+
+ if data.get('redis_version') and data.get('rdb_bgsave_in_progress'):
+ self.get_data_redis_specific(data)
+
+ return data
+
+ def get_data_redis_specific(self, data):
+ if data['rdb_bgsave_in_progress'] != '0':
+ self.bgsave_time += self.update_every
+ else:
+ self.bgsave_time = 0
+
+ data['rdb_last_bgsave_status'] = 0 if data['rdb_last_bgsave_status'] == 'ok' else 1
+ data['rdb_bgsave_in_progress'] = self.bgsave_time
+
+ def check(self):
+ """
+ Parse configuration, check if redis is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ data = self.get_raw_and_parse()
+
+ if not data:
+ return False
+
+ self.order = PIKA_ORDER if data.get('pika_version') else REDIS_ORDER
+
+ for n in self.order:
+ self.definitions.update(copy_chart(n))
+
+ if data.get('redis_version'):
+ for k in data:
+ if k.startswith('db'):
+ self.definitions['keys_redis']['lines'].append([k, None, 'absolute'])
+
+ return True
+
+ def _check_raw_data(self, data):
+ """
+ Check if all data has been gathered from socket.
+ Parse first line containing message length and check against received message
+ :param data: str
+ :return: boolean
+ """
+ length = len(data)
+ supposed = data.split('\n')[0][1:-1]
+ offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
+ if not supposed.isdigit():
+ return True
+ supposed = int(supposed)
+
+ if length - offset >= supposed:
+ self.debug('received full response from redis')
+ return True
+
+ self.debug('waiting more data from redis')
+ return False
diff --git a/conf.d/python.d/redis.conf b/collectors/python.d.plugin/redis/redis.conf
index 6363f6da7..6363f6da7 100644
--- a/conf.d/python.d/redis.conf
+++ b/collectors/python.d.plugin/redis/redis.conf
diff --git a/collectors/python.d.plugin/rethinkdbs/Makefile.inc b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
new file mode 100644
index 000000000..dec604464
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rethinkdbs/rethinkdbs.chart.py
+dist_pythonconfig_DATA += rethinkdbs/rethinkdbs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rethinkdbs/README.md rethinkdbs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
new file mode 100644
index 000000000..5d357fa49
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -0,0 +1,34 @@
+# rethinkdbs
+
+Module monitor rethinkdb health metrics.
+
+Following charts are drawn:
+
+1. **Connected Servers**
+ * connected
+ * missing
+
+2. **Active Clients**
+ * active
+
+3. **Queries** per second
+ * queries
+
+4. **Documents** per second
+ * documents
+
+### configuration
+
+```yaml
+
+localhost:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 28015
+ user : "user"
+ password : "pass"
+```
+
+When no configuration file is found, module tries to connect to `127.0.0.1:28015`.
+
+---
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
new file mode 100644
index 000000000..127e9ad4b
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
@@ -0,0 +1,235 @@
+# -*- coding: utf-8 -*-
+# Description: rethinkdb netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import rethinkdb as rdb
+ HAS_RETHINKDB = True
+except ImportError:
+ HAS_RETHINKDB = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+ORDER = [
+ 'cluster_connected_servers',
+ 'cluster_clients_active',
+ 'cluster_queries',
+ 'cluster_documents',
+]
+
+
+def cluster_charts():
+ return {
+ 'cluster_connected_servers': {
+ 'options': [None, 'Connected Servers', 'servers', 'cluster', 'rethinkdb.cluster_connected_servers',
+ 'stacked'],
+ 'lines': [
+ ['cluster_servers_connected', 'connected'],
+ ['cluster_servers_missing', 'missing'],
+ ]
+ },
+ 'cluster_clients_active': {
+ 'options': [None, 'Active Clients', 'clients', 'cluster', 'rethinkdb.cluster_clients_active',
+ 'line'],
+ 'lines': [
+ ['cluster_clients_active', 'active'],
+ ]
+ },
+ 'cluster_queries': {
+ 'options': [None, 'Queries', 'queries/s', 'cluster', 'rethinkdb.cluster_queries', 'line'],
+ 'lines': [
+ ['cluster_queries_per_sec', 'queries'],
+ ]
+ },
+ 'cluster_documents': {
+ 'options': [None, 'Documents', 'documents/s', 'cluster', 'rethinkdb.cluster_documents', 'line'],
+ 'lines': [
+ ['cluster_read_docs_per_sec', 'reads'],
+ ['cluster_written_docs_per_sec', 'writes'],
+ ]
+ },
+ }
+
+
+def server_charts(n):
+ o = [
+ '{0}_client_connections'.format(n),
+ '{0}_clients_active'.format(n),
+ '{0}_queries'.format(n),
+ '{0}_documents'.format(n),
+ ]
+ f = 'server {0}'.format(n)
+
+ c = {
+ o[0]: {
+ 'options': [None, 'Client Connections', 'connections', f, 'rethinkdb.client_connections', 'line'],
+ 'lines': [
+ ['{0}_client_connections'.format(n), 'connections'],
+ ]
+ },
+ o[1]: {
+ 'options': [None, 'Active Clients', 'clients', f, 'rethinkdb.clients_active', 'line'],
+ 'lines': [
+ ['{0}_clients_active'.format(n), 'active'],
+ ]
+ },
+ o[2]: {
+ 'options': [None, 'Queries', 'queries/s', f, 'rethinkdb.queries', 'line'],
+ 'lines': [
+ ['{0}_queries_total'.format(n), 'queries', 'incremental'],
+ ]
+ },
+ o[3]: {
+ 'options': [None, 'Documents', 'documents/s', f, 'rethinkdb.documents', 'line'],
+ 'lines': [
+ ['{0}_read_docs_total'.format(n), 'reads', 'incremental'],
+ ['{0}_written_docs_total'.format(n), 'writes', 'incremental'],
+ ]
+ },
+ }
+
+ return o, c
+
+
+class Cluster:
+ def __init__(self, raw):
+ self.raw = raw
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ return {
+ 'cluster_clients_active': qe['clients_active'],
+ 'cluster_queries_per_sec': qe['queries_per_sec'],
+ 'cluster_read_docs_per_sec': qe['read_docs_per_sec'],
+ 'cluster_written_docs_per_sec': qe['written_docs_per_sec'],
+ 'cluster_servers_connected': 0,
+ 'cluster_servers_missing': 0,
+ }
+
+
+class Server:
+ def __init__(self, raw):
+ self.name = raw['server']
+ self.raw = raw
+
+ def error(self):
+ return self.raw.get('error')
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ d = {
+ 'client_connections': qe['client_connections'],
+ 'clients_active': qe['clients_active'],
+ 'queries_total': qe['queries_total'],
+ 'read_docs_total': qe['read_docs_total'],
+ 'written_docs_total': qe['written_docs_total'],
+ }
+
+ return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = cluster_charts()
+
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 28015)
+ self.user = self.configuration.get('user', 'admin')
+ self.password = self.configuration.get('password')
+ self.timeout = self.configuration.get('timeout', 2)
+
+ self.conn = None
+ self.alive = True
+
+ def check(self):
+ if not HAS_RETHINKDB:
+ self.error('"rethinkdb" module is needed to use rethinkdbs.py')
+ return False
+
+ if not self.connect():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ for v in stats[1:]:
+ if get_id(v) == 'server':
+ o, c = server_charts(v['server'])
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def get_data(self):
+ if not self.is_alive():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ data = dict()
+
+ # cluster
+ data.update(Cluster(stats[0]).data())
+
+ # servers
+ for v in stats[1:]:
+ if get_id(v) != 'server':
+ continue
+
+ s = Server(v)
+
+ if s.error():
+ data['cluster_servers_missing'] += 1
+ else:
+ data['cluster_servers_connected'] += 1
+ data.update(s.data())
+
+ return data
+
+ def get_stats(self):
+ try:
+ return list(rdb.db('rethinkdb').table('stats').run(self.conn).items)
+ except rdb.errors.ReqlError:
+ self.alive = False
+ return None
+
+ def connect(self):
+ try:
+ self.conn = rdb.connect(
+ host=self.host,
+ port=self.port,
+ user=self.user,
+ password=self.password,
+ timeout=self.timeout,
+ )
+ self.alive = True
+ return True
+ except rdb.errors.ReqlError as error:
+ self.error('Connection to {0}:{1} failed: {2}'.format(self.host, self.port, error))
+ return False
+
+ def reconnect(self):
+ # The connection is already closed after rdb.errors.ReqlError,
+ # so we do not need to call conn.close()
+ if self.connect():
+ return True
+ return False
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+
+def get_id(v):
+ return v['id'][0]
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
new file mode 100644
index 000000000..73544fc2e
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
@@ -0,0 +1,78 @@
+# netdata python.d.plugin configuration for rethinkdb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, rethinkdb also supports the following:
+#
+# host: IP or HOSTNAME # default is 'localhost'
+# port: PORT # default is 28015
+# user: USERNAME # default is 'admin'
+# password: PASSWORD # not set by default
+# timeout: TIMEOUT # default is 2
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ name: 'local'
+ host: 'localhost'
diff --git a/collectors/python.d.plugin/retroshare/Makefile.inc b/collectors/python.d.plugin/retroshare/Makefile.inc
new file mode 100644
index 000000000..891193e6d
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += retroshare/retroshare.chart.py
+dist_pythonconfig_DATA += retroshare/retroshare.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += retroshare/README.md retroshare/Makefile.inc
+
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
new file mode 100644
index 000000000..e95095c65
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -0,0 +1 @@
+# retroshare
diff --git a/python.d/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
index 8c0330ec6..1d8e35050 100644
--- a/python.d/retroshare.chart.py
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: RetroShare netdata python.d module
# Authors: sehraf
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
@@ -20,19 +21,22 @@ CHARTS = {
'lines': [
['bandwidth_up_kb', 'Upload'],
['bandwidth_down_kb', 'Download']
- ]},
+ ]
+ },
'peers': {
'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
'lines': [
['peers_all', 'All friends'],
['peers_connected', 'Connected friends']
- ]},
+ ]
+ },
'dht': {
'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
'lines': [
['dht_size_all', 'DHT nodes estimated'],
['dht_size_rs', 'RS nodes estimated']
- ]}
+ ]
+ }
}
diff --git a/conf.d/python.d/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf
index 9c92583f7..9c92583f7 100644
--- a/conf.d/python.d/retroshare.conf
+++ b/collectors/python.d.plugin/retroshare/retroshare.conf
diff --git a/collectors/python.d.plugin/samba/Makefile.inc b/collectors/python.d.plugin/samba/Makefile.inc
new file mode 100644
index 000000000..230a8ba43
--- /dev/null
+++ b/collectors/python.d.plugin/samba/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += samba/samba.chart.py
+dist_pythonconfig_DATA += samba/samba.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += samba/README.md samba/Makefile.inc
+
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
new file mode 100644
index 000000000..44610d373
--- /dev/null
+++ b/collectors/python.d.plugin/samba/README.md
@@ -0,0 +1,67 @@
+# samba
+
+Performance metrics of Samba file sharing.
+
+**Requirements:**
+* `smbstatus` program
+* `sudo` program
+* `smbd` must be compiled with profiling enabled
+* `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
+* `netdata` user needs to be able to sudo the `smbstatus` program without password
+
+It produces the following charts:
+
+1. **Syscall R/Ws** in kilobytes/s
+ * sendfile
+ * recvfle
+
+2. **Smb2 R/Ws** in kilobytes/s
+ * readout
+ * writein
+ * readin
+ * writeout
+
+3. **Smb2 Create/Close** in operations/s
+ * create
+ * close
+
+4. **Smb2 Info** in operations/s
+ * getinfo
+ * setinfo
+
+5. **Smb2 Find** in operations/s
+ * find
+
+6. **Smb2 Notify** in operations/s
+ * notify
+
+7. **Smb2 Lesser Ops** as counters
+ * tcon
+ * negprot
+ * tdis
+ * cancel
+ * logoff
+ * flush
+ * lock
+ * keepalive
+ * break
+ * sessetup
+
+### prerequisite
+This module uses `smbstatus` which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `smbstatus` as root without password.
+
+Add to `sudoers`:
+
+ netdata ALL=(root) NOPASSWD: /path/to/smbstatus
+
+### configuration
+
+ **samba** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+
+```yaml
+samba: yes
+```
+
+---
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
new file mode 100644
index 000000000..b2278de9e
--- /dev/null
+++ b/collectors/python.d.plugin/samba/samba.chart.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+# Description: samba netdata python.d module
+# Author: Christopher Cox <chris_cox@endlessnow.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# The netdata user needs to be able to be able to sudo the smbstatus program
+# without password:
+# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+#
+# This makes calls to smbstatus -P
+#
+# This just looks at a couple of values out of syscall, and some from smb2.
+#
+# The Lesser Ops chart is merely a display of current counter values. They
+# didn't seem to change much to me. However, if you notice something changing
+# a lot there, bring one or more out into its own chart and make it incremental
+# (like find and notify... good examples).
+
+import re
+
+from bases.collection import find_binary
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+
+disabled_by_default = True
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+retries = 60
+
+ORDER = [
+ 'syscall_rw',
+ 'smb2_rw',
+ 'smb2_create_close',
+ 'smb2_info',
+ 'smb2_find',
+ 'smb2_notify',
+ 'smb2_sm_count'
+]
+
+CHARTS = {
+ 'syscall_rw': {
+ 'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area'],
+ 'lines': [
+ ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
+ ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_rw': {
+ 'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area'],
+ 'lines': [
+ ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
+ ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
+ ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
+ ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_create_close': {
+ 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'],
+ 'lines': [
+ ['smb2_create_count', 'create', 'incremental', 1, 1],
+ ['smb2_close_count', 'close', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_info': {
+ 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'],
+ 'lines': [
+ ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
+ ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_find': {
+ 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'],
+ 'lines': [
+ ['smb2_find_count', 'find', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_notify': {
+ 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'],
+ 'lines': [
+ ['smb2_notify_count', 'notify', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_sm_count': {
+ 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'],
+ 'lines': [
+ ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
+ ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
+ ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
+ ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
+ ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
+ ['smb2_flush_count', 'flush', 'absolute', 1, 1],
+ ['smb2_lock_count', 'lock', 'absolute', 1, 1],
+ ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
+ ['smb2_break_count', 'break', 'absolute', 1, 1],
+ ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
+
+ def check(self):
+ sudo_binary, smbstatus_binary = find_binary('sudo'), find_binary('smbstatus')
+
+ if not (sudo_binary and smbstatus_binary):
+ self.error("Can\'t locate 'sudo' or 'smbstatus' binary")
+ return False
+
+ self.command = [sudo_binary, '-v']
+ err = self._get_raw_data(stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+
+ self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
+
+ return ExecutableService.check(self)
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ parsed = self.rgx_smb2.findall(' '.join(raw_data))
+
+ return dict(parsed) or None
diff --git a/conf.d/python.d/samba.conf b/collectors/python.d.plugin/samba/samba.conf
index ee513c60f..ee513c60f 100644
--- a/conf.d/python.d/samba.conf
+++ b/collectors/python.d.plugin/samba/samba.conf
diff --git a/collectors/python.d.plugin/sensors/Makefile.inc b/collectors/python.d.plugin/sensors/Makefile.inc
new file mode 100644
index 000000000..5fb26e1c8
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += sensors/sensors.chart.py
+dist_pythonconfig_DATA += sensors/sensors.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
+
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
new file mode 100644
index 000000000..eb1642d90
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -0,0 +1,17 @@
+# sensors
+
+System sensors information.
+
+Charts are created dynamically.
+
+### configuration
+
+For detailed configuration information please read [`sensors.conf`](sensors.conf) file.
+
+### possible issues
+
+There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
+We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827).
+Please join this discussion for help.
+
+---
diff --git a/python.d/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
index 06e420b68..69d2bfe99 100644
--- a/python.d/sensors.chart.py
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: sensors netdata python.d plugin
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.SimpleService import SimpleService
from third_party import lm_sensors as sensors
@@ -16,37 +17,44 @@ CHARTS = {
'options': [None, ' temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'voltage': {
'options': [None, ' voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'current': {
'options': [None, ' current', 'Ampere', 'current', 'sensors.current', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'power': {
'options': [None, ' power', 'Watt', 'power', 'sensors.power', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000000]
- ]},
+ ]
+ },
'fan': {
'options': [None, ' fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]},
+ ]
+ },
'energy': {
'options': [None, ' energy', 'Joule', 'energy', 'sensors.energy', 'areastack'],
'lines': [
[None, None, 'incremental', 1, 1000000]
- ]},
+ ]
+ },
'humidity': {
'options': [None, ' humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'],
'lines': [
[None, None, 'absolute', 1, 1000]
- ]}
+ ]
+ }
}
LIMITS = {
@@ -94,7 +102,7 @@ class Service(SimpleService):
limit = LIMITS[type_name]
if val < limit[0] or val > limit[1]:
continue
- data[prefix + "_" + str(feature.name.decode())] = int(val * 1000)
+ data[prefix + '_' + str(feature.name.decode())] = int(val * 1000)
except Exception as error:
self.error(error)
return None
@@ -114,7 +122,7 @@ class Service(SimpleService):
continue
if TYPE_MAP[feature.type] == sensor:
# create chart
- name = chip_name + "_" + TYPE_MAP[feature.type]
+ name = chip_name + '_' + TYPE_MAP[feature.type]
if name not in self.order:
self.order.append(name)
chart_def = list(CHARTS[sensor]['options'])
@@ -122,7 +130,7 @@ class Service(SimpleService):
self.definitions[name] = {'options': chart_def}
self.definitions[name]['lines'] = []
line = list(CHARTS[sensor]['lines'][0])
- line[0] = chip_name + "_" + str(feature.name.decode())
+ line[0] = chip_name + '_' + str(feature.name.decode())
line[1] = sensors.get_label(chip, feature)
self.definitions[name]['lines'].append(line)
@@ -136,4 +144,3 @@ class Service(SimpleService):
self.create_definitions()
return True
-
diff --git a/conf.d/python.d/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf
index 83bbffd7d..83bbffd7d 100644
--- a/conf.d/python.d/sensors.conf
+++ b/collectors/python.d.plugin/sensors/sensors.conf
diff --git a/collectors/python.d.plugin/smartd_log/Makefile.inc b/collectors/python.d.plugin/smartd_log/Makefile.inc
new file mode 100644
index 000000000..dc1d0f3fb
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += smartd_log/smartd_log.chart.py
+dist_pythonconfig_DATA += smartd_log/smartd_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += smartd_log/README.md smartd_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
new file mode 100644
index 000000000..121a63573
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -0,0 +1,38 @@
+# smartd_log
+
+Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+
+It produces following charts (you can add additional attributes in the module configuration file):
+
+1. **Read Error Rate** attribute 1
+
+2. **Start/Stop Count** attribute 4
+
+3. **Reallocated Sectors Count** attribute 5
+
+4. **Seek Error Rate** attribute 7
+
+5. **Power-On Hours Count** attribute 9
+
+6. **Power Cycle Count** attribute 12
+
+7. **Load/Unload Cycles** attribute 193
+
+8. **Temperature** attribute 194
+
+9. **Current Pending Sectors** attribute 197
+
+10. **Off-Line Uncorrectable** attribute 198
+
+11. **Write Error Rate** attribute 200
+
+### configuration
+
+```yaml
+local:
+ log_path : '/var/log/smartd/'
+```
+
+If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory.
+
+---
diff --git a/python.d/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
index 07ad88cd4..21dbccecc 100644
--- a/python.d/smartd_log.chart.py
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: smart netdata python.d module
# Author: l2isbad, vorph1
+# SPDX-License-Identifier: GPL-3.0-or-later
import os
import re
@@ -115,7 +116,7 @@ def chart_template(chart_name):
chart_name: {
'options': [None, title, units, family, 'smartd_log.' + chart_name, 'line'],
'lines': []
- }
+ }
}
@@ -184,6 +185,12 @@ class Disk:
return self.name == other.name
return self.name == other
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
@handle_os_error
def is_active(self):
return (time() - os.path.getmtime(self.log_file.path)) / 60 < self.age
diff --git a/conf.d/python.d/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
index 3fab3f1c0..3fab3f1c0 100644
--- a/conf.d/python.d/smartd_log.conf
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf
diff --git a/collectors/python.d.plugin/spigotmc/Makefile.inc b/collectors/python.d.plugin/spigotmc/Makefile.inc
new file mode 100644
index 000000000..f9fa8b6b0
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += spigotmc/spigotmc.chart.py
+dist_pythonconfig_DATA += spigotmc/spigotmc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += spigotmc/README.md spigotmc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
new file mode 100644
index 000000000..ae5602587
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -0,0 +1,22 @@
+# spigotmc
+
+This module does some really basic monitoring for Spigot Minecraft servers.
+
+It provides two charts, one tracking server-side ticks-per-second in
+1, 5 and 15 minute averages, and one tracking the number of currently
+active users.
+
+This is not compatible with Spigot plugins which change the format of
+the data returned by the `tps` or `list` console commands.
+
+### configuration
+
+```yaml
+host: localhost
+port: 25575
+password: pass
+```
+
+By default, a connection to port 25575 on the local system is attempted with an empty password.
+
+---
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
new file mode 100644
index 000000000..a5e5ee0ee
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+# Description: spigotmc netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+import platform
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+from third_party import mcrcon
+
+# Update only every 5 seconds because collection takes in excess of
+# 100ms sometimes, and mos tpeople won't care about second-by-second data.
+update_every = 5
+
+PRECISION = 100
+
+ORDER = ['tps', 'users']
+
+CHARTS = {
+ 'tps': {
+ 'options': [None, 'Spigot Ticks Per Second', 'ticks', 'spigotmc', 'spigotmc.tps', 'line'],
+ 'lines': [
+ ['tps1', '1 Minute Average', 'absolute', 1, PRECISION],
+ ['tps5', '5 Minute Average', 'absolute', 1, PRECISION],
+ ['tps15', '15 Minute Average', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'users': {
+ 'options': [None, 'Minecraft Users', 'users', 'spigotmc', 'spigotmc.users', 'area'],
+ 'lines': [
+ ['users', 'Users', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 25575)
+ self.password = self.configuration.get('password', '')
+ self.console = mcrcon.MCRcon()
+ self.alive = True
+
+ def check(self):
+ if platform.system() != 'Linux':
+ self.error('Only supported on Linux.')
+ return False
+ try:
+ self.connect()
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+ return True
+
+ def connect(self):
+ self.console.connect(self.host, self.port, self.password)
+
+ def reconnect(self):
+ try:
+ try:
+ self.console.disconnect()
+ except mcrcon.MCRconException:
+ pass
+ self.console.connect(self.host, self.port, self.password)
+ self.alive = True
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+ return True
+
+ def is_alive(self):
+ if (not self.alive) or \
+ self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1:
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+ data = {}
+ try:
+ raw = self.console.command('tps')
+ # The above command returns a string that looks like this:
+ # '§6TPS from last 1m, 5m, 15m: §a19.99, §a19.99, §a19.99\n'
+ # The values we care about are the three numbers after the :
+ tmp = raw.split(':')[1].split(',')
+ data['tps1'] = float(tmp[0].lstrip(u' §a*')) * PRECISION
+ data['tps5'] = float(tmp[1].lstrip(u' §a*')) * PRECISION
+ data['tps15'] = float(tmp[2].lstrip(u' §a*').rstrip()) * PRECISION
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch TPS values.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+ except (TypeError, LookupError):
+ self.error('Unable to process TPS values.')
+ try:
+ raw = self.console.command('list')
+ # The above command returns a string that looks like this:
+ # 'There are 0/20 players online:'
+ # We care about the first number here.
+ data['users'] = int(raw.split()[2].split('/')[0])
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch user counts.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+ except (TypeError, LookupError):
+ self.error('Unable to process user counts.')
+ return data
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
new file mode 100644
index 000000000..3ba492def
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for spigotmc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# In addition to the above, spigotmc supports the following:
+#
+# host: localhost # The host to connect to. Defaults to the local system.
+# port: 25575 # THe port the remote console is listening on.
+# password: '' # The remote console password. Most be set correctly.
diff --git a/collectors/python.d.plugin/springboot/Makefile.inc b/collectors/python.d.plugin/springboot/Makefile.inc
new file mode 100644
index 000000000..06775f937
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += springboot/springboot.chart.py
+dist_pythonconfig_DATA += springboot/springboot.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += springboot/README.md springboot/Makefile.inc
+
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
new file mode 100644
index 000000000..008436a4f
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -0,0 +1,129 @@
+# springboot
+
+This module will monitor one or more Java Spring-boot applications depending on configuration.
+
+It produces following charts:
+
+1. **Response Codes** in requests/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+ * others
+
+2. **Threads**
+ * daemon
+ * total
+
+3. **GC Time** in milliseconds and **GC Operations** in operations/s
+ * Copy
+ * MarkSweep
+ * ...
+
+4. **Heap Mmeory Usage** in KB
+ * used
+ * committed
+
+### configuration
+
+Please see the [Monitoring Java Spring Boot Applications](https://github.com/netdata/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration.
+
+---
+
+# Monitoring Java Spring Boot Applications
+
+Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
+
+The Spring Boot Actuator exposes these metrics over HTTP and is very easy to use:
+* add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies
+* set `endpoints.metrics.sensitive=false` in your `application.properties`
+
+You can create custom Metrics by add and inject a PublicMetrics in your application.
+This is a example to add custom metrics:
+```java
+package com.example;
+
+import org.springframework.boot.actuate.endpoint.PublicMetrics;
+import org.springframework.boot.actuate.metrics.Metric;
+import org.springframework.stereotype.Service;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.util.ArrayList;
+import java.util.Collection;
+
+@Service
+public class HeapPoolMetrics implements PublicMetrics {
+
+ private static final String PREFIX = "mempool.";
+ private static final String KEY_EDEN = PREFIX + "eden";
+ private static final String KEY_SURVIVOR = PREFIX + "survivor";
+ private static final String KEY_TENURED = PREFIX + "tenured";
+
+ @Override
+ public Collection<Metric<?>> metrics() {
+ Collection<Metric<?>> result = new ArrayList<>(4);
+ for (MemoryPoolMXBean mem : ManagementFactory.getMemoryPoolMXBeans()) {
+ String poolName = mem.getName();
+ String name = null;
+ if (poolName.indexOf("Eden Space") != -1) {
+ name = KEY_EDEN;
+ } else if (poolName.indexOf("Survivor Space") != -1) {
+ name = KEY_SURVIVOR;
+ } else if (poolName.indexOf("Tenured Gen") != -1 || poolName.indexOf("Old Gen") != -1) {
+ name = KEY_TENURED;
+ }
+
+ if (name != null) {
+ result.add(newMemoryMetric(name, mem.getUsage().getMax()));
+ result.add(newMemoryMetric(name + ".init", mem.getUsage().getInit()));
+ result.add(newMemoryMetric(name + ".committed", mem.getUsage().getCommitted()));
+ result.add(newMemoryMetric(name + ".used", mem.getUsage().getUsed()));
+ }
+ }
+ return result;
+ }
+
+ private Metric<Long> newMemoryMetric(String name, long bytes) {
+ return new Metric<>(name, bytes / 1024);
+ }
+}
+```
+
+Please refer [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.
+
+## Using netdata springboot module
+
+The springboot module is enabled by default. It looks up `http://localhost:8080/metrics` and `http://127.0.0.1:8080/metrics` to detect Spring Boot application by default. You can change it by editing `/etc/netdata/python.d/springboot.conf` (to edit it on your system run `/etc/netdata/edit-config python.d/springboot.conf`).
+
+This module defines some common charts, and you can add custom charts by change the configurations.
+
+The configuration format is like:
+```yaml
+<id>:
+ name: '<name>'
+ url: '<metrics endpoint>' # ex. http://localhost:8080/metrics
+ user: '<username>' # optional
+ pass: '<password>' # optional
+ defaults:
+ [<chart-id>]: true|false
+ extras:
+ - id: '<chart-id>'
+ options:
+ title: '***'
+ units: '***'
+ family: '***'
+ context: 'springboot.***'
+ charttype: 'stacked' | 'area' | 'line'
+ lines:
+ - { dimension: 'myapp_ok', name: 'ok', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ok" metrics
+ - { dimension: 'myapp_ng', name: 'ng', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ng" metrics
+```
+
+By default, it creates `response_code`, `threads`, `gc_time`, `gc_ope` abd `heap` charts.
+You can disable the default charts by set `defaults.<chart-id>: false`.
+
+The dimension name of extras charts should replace `.` to `_`.
+
+Please check [springboot.conf](springboot.conf) for more examples. \ No newline at end of file
diff --git a/python.d/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
index 60ad0cccb..7df37e1d0 100644
--- a/python.d/springboot.chart.py
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: tomcat netdata python.d module
# Author: Wing924
+# SPDX-License-Identifier: GPL-3.0-or-later
import json
from bases.FrameworkServices.UrlService import UrlService
@@ -17,54 +18,61 @@ DEFAULT_CHARTS = {
'response_code': {
'options': [None, "Response Codes", "requests/s", "response", "springboot.response_code", "stacked"],
'lines': [
- ["resp_other", 'Other', 'incremental'],
- ["resp_1xx", '1xx', 'incremental'],
- ["resp_2xx", '2xx', 'incremental'],
- ["resp_3xx", '3xx', 'incremental'],
- ["resp_4xx", '4xx', 'incremental'],
- ["resp_5xx", '5xx', 'incremental'],
- ]},
+ ["resp_other", 'Other', 'incremental'],
+ ["resp_1xx", '1xx', 'incremental'],
+ ["resp_2xx", '2xx', 'incremental'],
+ ["resp_3xx", '3xx', 'incremental'],
+ ["resp_4xx", '4xx', 'incremental'],
+ ["resp_5xx", '5xx', 'incremental'],
+ ]
+ },
'threads': {
'options': [None, "Threads", "current threads", "threads", "springboot.threads", "area"],
'lines': [
["threads_daemon", 'daemon', 'absolute'],
["threads", 'total', 'absolute'],
- ]},
+ ]
+ },
'gc_time': {
'options': [None, "GC Time", "milliseconds", "garbage collection", "springboot.gc_time", "stacked"],
'lines': [
- ["gc_copy_time", 'Copy', 'incremental'],
- ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'],
- ["gc_parnew_time", 'ParNew', 'incremental'],
- ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'],
- ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'],
- ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'],
- ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'],
- ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'],
- ]},
+ ["gc_copy_time", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_time", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'],
+ ]
+ },
'gc_ope': {
'options': [None, "GC Operations", "operations/s", "garbage collection", "springboot.gc_ope", "stacked"],
'lines': [
- ["gc_copy_count", 'Copy', 'incremental'],
- ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'],
- ["gc_parnew_count", 'ParNew', 'incremental'],
- ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'],
- ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'],
- ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'],
- ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'],
- ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'],
- ]},
+ ["gc_copy_count", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_count", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'],
+ ]
+ },
'heap': {
'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"],
'lines': [
["heap_committed", 'committed', "absolute"],
["heap_used", 'used', "absolute"],
- ]},
+ ]
+ }
}
+
class ExtraChartError(ValueError):
pass
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
@@ -87,11 +95,11 @@ class Service(UrlService):
return None
result = {
- 'resp_1xx': 0,
- 'resp_2xx': 0,
- 'resp_3xx': 0,
- 'resp_4xx': 0,
- 'resp_5xx': 0,
+ 'resp_1xx': 0,
+ 'resp_2xx': 0,
+ 'resp_3xx': 0,
+ 'resp_4xx': 0,
+ 'resp_5xx': 0,
'resp_other': 0,
}
@@ -121,14 +129,14 @@ class Service(UrlService):
self.order.append(extra['id'])
def _add_extra_chart(self, chart):
- chart_id = chart.get('id', None) or die('id is not defined in extra chart')
- options = chart.get('options', None) or die('option is not defined in extra chart: %s' % chart_id)
- lines = chart.get('lines', None) or die('lines is not defined in extra chart: %s' % chart_id)
-
- title = options.get('title', None) or die('title is missing: %s' % chart_id)
- units = options.get('units', None) or die('units is missing: %s' % chart_id)
- family = options.get('family', title)
- context = options.get('context', 'springboot.' + title)
+ chart_id = chart.get('id', None) or self.die('id is not defined in extra chart')
+ options = chart.get('options', None) or self.die('option is not defined in extra chart: %s' % chart_id)
+ lines = chart.get('lines', None) or self.die('lines is not defined in extra chart: %s' % chart_id)
+
+ title = options.get('title', None) or self.die('title is missing: %s' % chart_id)
+ units = options.get('units', None) or self.die('units is missing: %s' % chart_id)
+ family = options.get('family', title)
+ context = options.get('context', 'springboot.' + title)
charttype = options.get('charttype', 'line')
result = {
@@ -137,11 +145,11 @@ class Service(UrlService):
}
for line in lines:
- dimension = line.get('dimension', None) or die('dimension is missing: %s' % chart_id)
- name = line.get('name', dimension)
- algorithm = line.get('algorithm', 'absolute')
+ dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
+ name = line.get('name', dimension)
+ algorithm = line.get('algorithm', 'absolute')
multiplier = line.get('multiplier', 1)
- divisor = line.get('divisor', 1)
+ divisor = line.get('divisor', 1)
result['lines'].append([dimension, name, algorithm, multiplier, divisor])
self.definitions[chart_id] = result
diff --git a/conf.d/python.d/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
index 40b5fb437..40b5fb437 100644
--- a/conf.d/python.d/springboot.conf
+++ b/collectors/python.d.plugin/springboot/springboot.conf
diff --git a/collectors/python.d.plugin/squid/Makefile.inc b/collectors/python.d.plugin/squid/Makefile.inc
new file mode 100644
index 000000000..76ecff81e
--- /dev/null
+++ b/collectors/python.d.plugin/squid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += squid/squid.chart.py
+dist_pythonconfig_DATA += squid/squid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += squid/README.md squid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
new file mode 100644
index 000000000..9c9b62f27
--- /dev/null
+++ b/collectors/python.d.plugin/squid/README.md
@@ -0,0 +1,38 @@
+# squid
+
+This module will monitor one or more squid instances depending on configuration.
+
+It produces following charts:
+
+1. **Client Bandwidth** in kilobits/s
+ * in
+ * out
+ * hits
+
+2. **Client Requests** in requests/s
+ * requests
+ * hits
+ * errors
+
+3. **Server Bandwidth** in kilobits/s
+ * in
+ * out
+
+4. **Server Requests** in requests/s
+ * requests
+ * errors
+
+### configuration
+
+```yaml
+priority : 50000
+
+local:
+ request : 'cache_object://localhost:3128/counters'
+ host : 'localhost'
+ port : 3128
+```
+
+Without any configuration module will try to autodetect where squid presents its `counters` data
+
+---
diff --git a/python.d/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
index ba8f982ff..fd54168f0 100644
--- a/python.d/squid.chart.py
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: squid netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
from bases.FrameworkServices.SocketService import SocketService
@@ -15,31 +16,35 @@ ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests']
CHARTS = {
'clients_net': {
- 'options': [None, "Squid Client Bandwidth", "kilobits/s", "clients", "squid.clients_net", "area"],
+ 'options': [None, 'Squid Client Bandwidth', 'kilobits/s', 'clients', 'squid.clients_net', 'area'],
'lines': [
- ["client_http_kbytes_in", "in", "incremental", 8, 1],
- ["client_http_kbytes_out", "out", "incremental", -8, 1],
- ["client_http_hit_kbytes_out", "hits", "incremental", -8, 1]
- ]},
+ ['client_http_kbytes_in', 'in', 'incremental', 8, 1],
+ ['client_http_kbytes_out', 'out', 'incremental', -8, 1],
+ ['client_http_hit_kbytes_out', 'hits', 'incremental', -8, 1]
+ ]
+ },
'clients_requests': {
- 'options': [None, "Squid Client Requests", "requests/s", "clients", "squid.clients_requests", 'line'],
+ 'options': [None, 'Squid Client Requests', 'requests/s', 'clients', 'squid.clients_requests', 'line'],
'lines': [
- ["client_http_requests", "requests", "incremental"],
- ["client_http_hits", "hits", "incremental"],
- ["client_http_errors", "errors", "incremental", -1, 1]
- ]},
+ ['client_http_requests', 'requests', 'incremental'],
+ ['client_http_hits', 'hits', 'incremental'],
+ ['client_http_errors', 'errors', 'incremental', -1, 1]
+ ]
+ },
'servers_net': {
- 'options': [None, "Squid Server Bandwidth", "kilobits/s", "servers", "squid.servers_net", "area"],
+ 'options': [None, 'Squid Server Bandwidth', 'kilobits/s', 'servers', 'squid.servers_net', 'area'],
'lines': [
- ["server_all_kbytes_in", "in", "incremental", 8, 1],
- ["server_all_kbytes_out", "out", "incremental", -8, 1]
- ]},
+ ['server_all_kbytes_in', 'in', 'incremental', 8, 1],
+ ['server_all_kbytes_out', 'out', 'incremental', -8, 1]
+ ]
+ },
'servers_requests': {
- 'options': [None, "Squid Server Requests", "requests/s", "servers", "squid.servers_requests", 'line'],
+ 'options': [None, 'Squid Server Requests', 'requests/s', 'servers', 'squid.servers_requests', 'line'],
'lines': [
- ["server_all_requests", "requests", "incremental"],
- ["server_all_errors", "errors", "incremental", -1, 1]
- ]}
+ ['server_all_requests', 'requests', 'incremental'],
+ ['server_all_errors', 'errors', 'incremental', -1, 1]
+ ]
+ }
}
@@ -47,8 +52,8 @@ class Service(SocketService):
def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name)
self._keep_alive = True
- self.request = ""
- self.host = "localhost"
+ self.request = ''
+ self.host = 'localhost'
self.port = 3128
self.order = ORDER
self.definitions = CHARTS
@@ -62,43 +67,43 @@ class Service(SocketService):
data = dict()
try:
- raw = ""
+ raw = ''
for tmp in response.split('\r\n'):
- if tmp.startswith("sample_time"):
+ if tmp.startswith('sample_time'):
raw = tmp
break
if raw.startswith('<'):
- self.error("invalid data received")
+ self.error('invalid data received')
return None
for row in raw.split('\n'):
- if row.startswith(("client", "server.all")):
- tmp = row.split("=")
+ if row.startswith(('client', 'server.all')):
+ tmp = row.split('=')
data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
except (ValueError, AttributeError, TypeError):
- self.error("invalid data received")
+ self.error('invalid data received')
return None
if not data:
- self.error("no data received")
+ self.error('no data received')
return None
return data
def _check_raw_data(self, data):
header = data[:1024].lower()
- if "connection: keep-alive" in header:
+ if 'connection: keep-alive' in header:
self._keep_alive = True
else:
self._keep_alive = False
- if data[-7:] == "\r\n0\r\n\r\n" and "transfer-encoding: chunked" in header: # HTTP/1.1 response
- self.debug("received full response from squid")
+ if data[-7:] == '\r\n0\r\n\r\n' and 'transfer-encoding: chunked' in header: # HTTP/1.1 response
+ self.debug('received full response from squid')
return True
- self.debug("waiting more data from squid")
+ self.debug('waiting more data from squid')
return False
def check(self):
@@ -109,10 +114,10 @@ class Service(SocketService):
self._parse_config()
# format request
req = self.request.decode()
- if not req.startswith("GET"):
- req = "GET " + req
- if not req.endswith(" HTTP/1.1\r\n\r\n"):
- req += " HTTP/1.1\r\n\r\n"
+ if not req.startswith('GET'):
+ req = 'GET ' + req
+ if not req.endswith(' HTTP/1.1\r\n\r\n'):
+ req += ' HTTP/1.1\r\n\r\n'
self.request = req.encode()
if self._get_data() is not None:
return True
diff --git a/conf.d/python.d/squid.conf b/collectors/python.d.plugin/squid/squid.conf
index 564187f00..564187f00 100644
--- a/conf.d/python.d/squid.conf
+++ b/collectors/python.d.plugin/squid/squid.conf
diff --git a/collectors/python.d.plugin/tomcat/Makefile.inc b/collectors/python.d.plugin/tomcat/Makefile.inc
new file mode 100644
index 000000000..940a7835e
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += tomcat/tomcat.chart.py
+dist_pythonconfig_DATA += tomcat/tomcat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc
+
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
new file mode 100644
index 000000000..e548bd338
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -0,0 +1,33 @@
+# tomcat
+
+Present tomcat containers memory utilization.
+
+Charts:
+
+1. **Requests** per second
+ * accesses
+
+2. **Volume** in KB/s
+ * volume
+
+3. **Threads**
+ * current
+ * busy
+
+4. **JVM Free Memory** in MB
+ * jvm
+
+### configuration
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+ user : 'tomcat_username'
+ pass : 'secret_tomcat_password'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials.
+So it will probably fail.
+
+---
diff --git a/python.d/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
index a570d5643..3c2d0ed40 100644
--- a/python.d/tomcat.chart.py
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# Description: tomcat netdata python.d module
# Author: Pawel Krupa (paulfantom)
+# Author: Wei He (Wing924)
+# SPDX-License-Identifier: GPL-3.0-or-later
import xml.etree.ElementTree as ET
@@ -16,67 +18,75 @@ ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden
CHARTS = {
'accesses': {
- 'options': [None, "Requests", "requests/s", "statistics", "tomcat.accesses", "area"],
+ 'options': [None, 'Requests', 'requests/s', 'statistics', 'tomcat.accesses', 'area'],
'lines': [
- ["requestCount", 'accesses', 'incremental'],
- ["errorCount", 'errors', 'incremental'],
- ]},
+ ['requestCount', 'accesses', 'incremental'],
+ ['errorCount', 'errors', 'incremental'],
+ ]
+ },
'bandwidth': {
- 'options': [None, "Bandwidth", "KB/s", "statistics", "tomcat.bandwidth", "area"],
+ 'options': [None, 'Bandwidth', 'KB/s', 'statistics', 'tomcat.bandwidth', 'area'],
'lines': [
- ["bytesSent", 'sent', 'incremental', 1, 1024],
- ["bytesReceived", 'received', 'incremental', 1, 1024],
- ]},
+ ['bytesSent', 'sent', 'incremental', 1, 1024],
+ ['bytesReceived', 'received', 'incremental', 1, 1024],
+ ]
+ },
'processing_time': {
- 'options': [None, "processing time", "seconds", "statistics", "tomcat.processing_time", "area"],
+ 'options': [None, 'processing time', 'seconds', 'statistics', 'tomcat.processing_time', 'area'],
'lines': [
- ["processingTime", 'processing time', 'incremental', 1, 1000]
- ]},
+ ['processingTime', 'processing time', 'incremental', 1, 1000]
+ ]
+ },
'threads': {
- 'options': [None, "Threads", "current threads", "statistics", "tomcat.threads", "area"],
+ 'options': [None, 'Threads', 'current threads', 'statistics', 'tomcat.threads', 'area'],
'lines': [
- ["currentThreadCount", 'current', "absolute"],
- ["currentThreadsBusy", 'busy', "absolute"]
- ]},
+ ['currentThreadCount', 'current', 'absolute'],
+ ['currentThreadsBusy', 'busy', 'absolute']
+ ]
+ },
'jvm': {
- 'options': [None, "JVM Memory Pool Usage", "MB", "memory", "tomcat.jvm", "stacked"],
+ 'options': [None, 'JVM Memory Pool Usage', 'MB', 'memory', 'tomcat.jvm', 'stacked'],
'lines': [
- ["free", 'free', "absolute", 1, 1048576],
- ["eden_used", 'eden', "absolute", 1, 1048576],
- ["survivor_used", 'survivor', "absolute", 1, 1048576],
- ["tenured_used", 'tenured', "absolute", 1, 1048576],
- ["code_cache_used", 'code cache', "absolute", 1, 1048576],
- ["compressed_used", 'compressed', "absolute", 1, 1048576],
- ["metaspace_used", 'metaspace', "absolute", 1, 1048576],
- ]},
+ ['free', 'free', 'absolute', 1, 1048576],
+ ['eden_used', 'eden', 'absolute', 1, 1048576],
+ ['survivor_used', 'survivor', 'absolute', 1, 1048576],
+ ['tenured_used', 'tenured', 'absolute', 1, 1048576],
+ ['code_cache_used', 'code cache', 'absolute', 1, 1048576],
+ ['compressed_used', 'compressed', 'absolute', 1, 1048576],
+ ['metaspace_used', 'metaspace', 'absolute', 1, 1048576],
+ ]
+ },
'jvm_eden': {
- 'options': [None, "Eden Memory Usage", "MB", "memory", "tomcat.jvm_eden", "area"],
+ 'options': [None, 'Eden Memory Usage', 'MB', 'memory', 'tomcat.jvm_eden', 'area'],
'lines': [
- ["eden_used", 'used', "absolute", 1, 1048576],
- ["eden_commited", 'commited', "absolute", 1, 1048576],
- ["eden_max", 'max', "absolute", 1, 1048576]
- ]},
+ ['eden_used', 'used', 'absolute', 1, 1048576],
+ ['eden_committed', 'committed', 'absolute', 1, 1048576],
+ ['eden_max', 'max', 'absolute', 1, 1048576]
+ ]
+ },
'jvm_survivor': {
- 'options': [None, "Survivor Memory Usage", "MB", "memory", "tomcat.jvm_survivor", "area"],
+ 'options': [None, 'Survivor Memory Usage', 'MB', 'memory', 'tomcat.jvm_survivor', 'area'],
'lines': [
- ["survivor_used", 'used', "absolute", 1, 1048576],
- ["survivor_commited", 'commited', "absolute", 1, 1048576],
- ["survivor_max", 'max', "absolute", 1, 1048576]
- ]},
+ ['survivor_used', 'used', 'absolute', 1, 1048576],
+ ['survivor_committed', 'committed', 'absolute', 1, 1048576],
+ ['survivor_max', 'max', 'absolute', 1, 1048576]
+ ]
+ },
'jvm_tenured': {
- 'options': [None, "Tenured Memory Usage", "MB", "memory", "tomcat.jvm_tenured", "area"],
+ 'options': [None, 'Tenured Memory Usage', 'MB', 'memory', 'tomcat.jvm_tenured', 'area'],
'lines': [
- ["tenured_used", 'used', "absolute", 1, 1048576],
- ["tenured_commited", 'commited', "absolute", 1, 1048576],
- ["tenured_max", 'max', "absolute", 1, 1048576]
- ]},
+ ['tenured_used', 'used', 'absolute', 1, 1048576],
+ ['tenured_committed', 'committed', 'absolute', 1, 1048576],
+ ['tenured_max', 'max', 'absolute', 1, 1048576]
+ ]
+ }
}
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', "http://127.0.0.1:8080/manager/status?XML=true")
+ self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
self.connector_name = self.configuration.get('connector_name', None)
self.order = ORDER
self.definitions = CHARTS
@@ -115,27 +125,27 @@ class Service(UrlService):
name = pool.get('name')
if 'Eden Space' in name:
data['eden_used'] = pool.get('usageUsed')
- data['eden_commited'] = pool.get('usageCommitted')
+ data['eden_committed'] = pool.get('usageCommitted')
data['eden_max'] = pool.get('usageMax')
elif 'Survivor Space' in name:
data['survivor_used'] = pool.get('usageUsed')
- data['survivor_commited'] = pool.get('usageCommitted')
+ data['survivor_committed'] = pool.get('usageCommitted')
data['survivor_max'] = pool.get('usageMax')
elif 'Tenured Gen' in name or 'Old Gen' in name:
data['tenured_used'] = pool.get('usageUsed')
- data['tenured_commited'] = pool.get('usageCommitted')
+ data['tenured_committed'] = pool.get('usageCommitted')
data['tenured_max'] = pool.get('usageMax')
elif name == 'Code Cache':
data['code_cache_used'] = pool.get('usageUsed')
- data['code_cache_commited'] = pool.get('usageCommitted')
+ data['code_cache_committed'] = pool.get('usageCommitted')
data['code_cache_max'] = pool.get('usageMax')
elif name == 'Compressed':
data['compressed_used'] = pool.get('usageUsed')
- data['compressed_commited'] = pool.get('usageCommitted')
+ data['compressed_committed'] = pool.get('usageCommitted')
data['compressed_max'] = pool.get('usageMax')
elif name == 'Metaspace':
data['metaspace_used'] = pool.get('usageUsed')
- data['metaspace_commited'] = pool.get('usageCommitted')
+ data['metaspace_committed'] = pool.get('usageCommitted')
data['metaspace_max'] = pool.get('usageMax')
if connector:
@@ -145,9 +155,9 @@ class Service(UrlService):
request_info = connector.find('requestInfo')
data['processingTime'] = request_info.get('processingTime')
- data['requestCount'] = request_info.get('requestCount')
- data['errorCount'] = request_info.get('errorCount')
- data['bytesReceived'] = request_info.get('bytesReceived')
- data['bytesSent'] = request_info.get('bytesSent')
+ data['requestCount'] = request_info.get('requestCount')
+ data['errorCount'] = request_info.get('errorCount')
+ data['bytesReceived'] = request_info.get('bytesReceived')
+ data['bytesSent'] = request_info.get('bytesSent')
return data or None
diff --git a/conf.d/python.d/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf
index c63f06cfa..c63f06cfa 100644
--- a/conf.d/python.d/tomcat.conf
+++ b/collectors/python.d.plugin/tomcat/tomcat.conf
diff --git a/collectors/python.d.plugin/traefik/Makefile.inc b/collectors/python.d.plugin/traefik/Makefile.inc
new file mode 100644
index 000000000..926d56dda
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += traefik/traefik.chart.py
+dist_pythonconfig_DATA += traefik/traefik.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += traefik/README.md traefik/Makefile.inc
+
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
new file mode 100644
index 000000000..9b4a18208
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -0,0 +1,54 @@
+# traefik
+
+Module uses the `health` API to provide statistics.
+
+It produces:
+
+1. **Responses** by statuses
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+2. **Responses** by codes
+ * 2xx (successful)
+ * 5xx (internal server errors)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 1xx (informational)
+ * other (non-standart responses)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Requests**/s
+ * request statistics
+
+5. **Total response time**
+ * sum of all response time
+
+6. **Average response time**
+
+7. **Average response time per iteration**
+
+8. **Uptime**
+ * Traefik server uptime
+
+### configuration
+
+Needs only `url` to server's `health`
+
+Here is an example for local server:
+
+```yaml
+update_every : 1
+priority : 60000
+
+local:
+ url : 'http://localhost:8080/health'
+ retries : 10
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/health`.
+
+---
diff --git a/python.d/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
index f7c3e223b..dc8933220 100644
--- a/python.d/traefik.chart.py
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: traefik netdata python.d module
# Author: Alexandre Menezes (@ale_menezes)
+# SPDX-License-Identifier: GPL-3.0-or-later
from json import loads
from collections import defaultdict
@@ -32,7 +33,8 @@ CHARTS = {
['redirects', 'redirect', 'incremental'],
['bad_requests', 'bad', 'incremental'],
['other_requests', 'other', 'incremental']
- ]},
+ ]
+ },
'response_codes': {
'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'],
'lines': [
@@ -42,37 +44,45 @@ CHARTS = {
['4xx', None, 'incremental'],
['1xx', None, 'incremental'],
['other', None, 'incremental']
- ]},
+ ]
+ },
'detailed_response_codes': {
- 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes', 'stacked'],
- 'lines': [
- ]},
+ 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes',
+ 'stacked'],
+ 'lines': []
+ },
'requests': {
'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'],
'lines': [
['total_count', 'requests', 'incremental']
- ]},
+ ]
+ },
'total_response_time': {
'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'],
'lines': [
['total_response_time_sec', 'response', 'absolute', 1, 10000]
- ]},
+ ]
+ },
'average_response_time': {
'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'],
'lines': [
['average_response_time_sec', 'response', 'absolute', 1, 1000]
- ]},
+ ]
+ },
'average_response_time_per_iteration': {
- 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings', 'traefik.average_response_time_per_iteration', 'line'],
+ 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings',
+ 'traefik.average_response_time_per_iteration', 'line'],
'lines': [
['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000]
- ]},
+ ]
+ },
'uptime': {
'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'],
'lines': [
['uptime_sec', 'uptime', 'absolute']
- ]}
+ ]
}
+}
HEALTH_STATS = [
'uptime_sec',
@@ -82,6 +92,7 @@ HEALTH_STATS = [
'total_status_code_count'
]
+
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
@@ -116,9 +127,11 @@ class Service(UrlService):
self.data['average_response_time_sec'] *= 1000000
self.data['total_response_time_sec'] *= 10000
if data['total_count'] != self.last_total_count:
- self.data['average_response_time_per_iteration_sec'] = (data['total_response_time_sec'] - self.last_total_response_time) * 1000000 / (data['total_count'] - self.last_total_count)
+ self.data['average_response_time_per_iteration_sec'] = \
+ (data['total_response_time_sec'] - self.last_total_response_time) * \
+ 1000000 / (data['total_count'] - self.last_total_count)
else:
- self.data['average_response_time_per_iteration_sec'] = 0
+ self.data['average_response_time_per_iteration_sec'] = 0
self.last_total_response_time = data['total_response_time_sec']
self.last_total_count = data['total_count']
@@ -165,6 +178,7 @@ class Service(UrlService):
self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
self.data[code] = value
+
def fetch_data_(raw_data, metrics):
data = dict()
diff --git a/conf.d/python.d/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf
index 909b9e549..909b9e549 100644
--- a/conf.d/python.d/traefik.conf
+++ b/collectors/python.d.plugin/traefik/traefik.conf
diff --git a/collectors/python.d.plugin/unbound/Makefile.inc b/collectors/python.d.plugin/unbound/Makefile.inc
new file mode 100644
index 000000000..59c306aed
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += unbound/unbound.chart.py
+dist_pythonconfig_DATA += unbound/unbound.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += unbound/README.md unbound/Makefile.inc
+
diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md
new file mode 100644
index 000000000..3b4fa16fd
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/README.md
@@ -0,0 +1,76 @@
+# unbound
+
+Monitoring uses the remote control interface to fetch statistics.
+
+Provides the following charts:
+
+1. **Queries Processed**
+ * Ratelimited
+ * Cache Misses
+ * Cache Hits
+ * Expired
+ * Prefetched
+ * Recursive
+
+2. **Request List**
+ * Average Size
+ * Max Size
+ * Overwritten Requests
+ * Overruns
+ * Current Size
+ * User Requests
+
+3. **Recursion Timings**
+ * Average recursion processing time
+ * Median recursion processing time
+
+If extended stats are enabled, also provides:
+
+4. **Cache Sizes**
+ * Message Cache
+ * RRset Cache
+ * Infra Cache
+ * DNSSEC Key Cache
+ * DNSCrypt Shared Secret Cache
+ * DNSCrypt Nonce Cache
+
+### configuration
+
+Unbound must be manually configured to enable the remote-control protocol.
+Check the Unbound documentation for info on how to do this. Additionally,
+if you want to take advantage of the autodetection this plugin offers,
+you will need to make sure your `unbound.conf` file only uses spaces for
+indentation (the default config shipped by most distributions uses tabs
+instead of spaces).
+
+Once you have the Unbound control protocol enabled, you need to make sure
+that either the certificate and key are readable by Netdata (if you're
+using the regular control interface), or that the socket is accessible
+to Netdata (if you're using a UNIX socket for the contorl interface).
+
+By default, for the local system, everything can be auto-detected
+assuming Unbound is configured correctly and has been told to listen
+on the loopback interface or a UNIX socket. This is done by looking
+up info in the Unbound config file specified by the `ubconf` key.
+
+To enable extended stats for a given job, add `extended: yes` to the
+definition.
+
+You can also enable per-thread charts for a given job by adding
+`per_thread: yes` to the definition. Note that the numbe rof threads
+is only checked on startup.
+
+A basic local configuration with extended statistics and per-thread
+charts looks like this:
+
+```yaml
+local:
+ ubconf: /etc/unbound/unbound.conf
+ extended: yes
+ per_thread: yes
+```
+
+While it's a bit more complicated to set up correctly, it is recommended
+that you use a UNIX socket as it provides far better performance.
+
+---
diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py
new file mode 100644
index 000000000..52fcbf7e2
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/unbound.chart.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+# Description: unbound netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import sys
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+from bases.loaders import YamlOrderedLoader
+
+PRECISION = 1000
+
+ORDER = ['queries', 'recursion', 'reqlist']
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'Queries Processed', 'queries', 'Unbound', 'unbound.queries', 'line'],
+ 'lines': [
+ ['ratelimit', 'ratelimited', 'absolute', 1, 1],
+ ['cachemiss', 'cache_miss', 'absolute', 1, 1],
+ ['cachehit', 'cache_hit', 'absolute', 1, 1],
+ ['expired', 'expired', 'absolute', 1, 1],
+ ['prefetch', 'prefetched', 'absolute', 1, 1],
+ ['recursive', 'recursive', 'absolute', 1, 1]
+ ]
+ },
+ 'recursion': {
+ 'options': [None, 'Recursion Timings', 'seconds', 'Unbound', 'unbound.recursion', 'line'],
+ 'lines': [
+ ['recursive_avg', 'average', 'absolute', 1, PRECISION],
+ ['recursive_med', 'median', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'reqlist': {
+ 'options': [None, 'Request List', 'items', 'Unbound', 'unbound.reqlist', 'line'],
+ 'lines': [
+ ['reqlist_avg', 'average_size', 'absolute', 1, 1],
+ ['reqlist_max', 'maximum_size', 'absolute', 1, 1],
+ ['reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
+ ['reqlist_exceeded', 'overruns', 'absolute', 1, 1],
+ ['reqlist_current', 'current_size', 'absolute', 1, 1],
+ ['reqlist_user', 'user_requests', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# These get added too if we are told to use extended stats.
+EXTENDED_ORDER = ['cache']
+
+EXTENDED_CHARTS = {
+ 'cache': {
+ 'options': [None, 'Cache Sizes', 'items', 'Unbound', 'unbound.cache', 'stacked'],
+ 'lines': [
+ ['cache_message', 'message_cache', 'absolute', 1, 1],
+ ['cache_rrset', 'rrset_cache', 'absolute', 1, 1],
+ ['cache_infra', 'infra_cache', 'absolute', 1, 1],
+ ['cache_key', 'dnssec_key_cache', 'absolute', 1, 1],
+ ['cache_dnscss', 'dnscrypt_Shared_Secret_cache', 'absolute', 1, 1],
+ ['cache_dnscn', 'dnscrypt_Nonce_cache', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# This is used as a templates for the per-thread charts.
+PER_THREAD_CHARTS = {
+ '_queries': {
+ 'options': [None, '{longname} Queries Processed', 'queries', 'Queries Processed',
+ 'unbound.threads.queries', 'line'],
+ 'lines': [
+ ['{shortname}_ratelimit', 'ratelimited', 'absolute', 1, 1],
+ ['{shortname}_cachemiss', 'cache_miss', 'absolute', 1, 1],
+ ['{shortname}_cachehit', 'cache_hit', 'absolute', 1, 1],
+ ['{shortname}_expired', 'expired', 'absolute', 1, 1],
+ ['{shortname}_prefetch', 'prefetched', 'absolute', 1, 1],
+ ['{shortname}_recursive', 'recursive', 'absolute', 1, 1]
+ ]
+ },
+ '_recursion': {
+ 'options': [None, '{longname} Recursion Timings', 'seconds', 'Recursive Timings',
+ 'unbound.threads.recursion', 'line'],
+ 'lines': [
+ ['{shortname}_recursive_avg', 'average', 'absolute', 1, PRECISION],
+ ['{shortname}_recursive_med', 'median', 'absolute', 1, PRECISION]
+ ]
+ },
+ '_reqlist': {
+ 'options': [None, '{longname} Request List', 'items', 'Request List', 'unbound.threads.reqlist', 'line'],
+ 'lines': [
+ ['{shortname}_reqlist_avg', 'average_size', 'absolute', 1, 1],
+ ['{shortname}_reqlist_max', 'maximum_size', 'absolute', 1, 1],
+ ['{shortname}_reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
+ ['{shortname}_reqlist_exceeded', 'overruns', 'absolute', 1, 1],
+ ['{shortname}_reqlist_current', 'current_size', 'absolute', 1, 1],
+ ['{shortname}_reqlist_user', 'user_requests', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+# This maps the Unbound stat names to our names and precision requiremnets.
+STAT_MAP = {
+ 'total.num.queries_ip_ratelimited': ('ratelimit', 1),
+ 'total.num.cachehits': ('cachehit', 1),
+ 'total.num.cachemiss': ('cachemiss', 1),
+ 'total.num.zero_ttl': ('expired', 1),
+ 'total.num.prefetch': ('prefetch', 1),
+ 'total.num.recursivereplies': ('recursive', 1),
+ 'total.requestlist.avg': ('reqlist_avg', 1),
+ 'total.requestlist.max': ('reqlist_max', 1),
+ 'total.requestlist.overwritten': ('reqlist_overwritten', 1),
+ 'total.requestlist.exceeded': ('reqlist_exceeded', 1),
+ 'total.requestlist.current.all': ('reqlist_current', 1),
+ 'total.requestlist.current.user': ('reqlist_user', 1),
+ 'total.recursion.time.avg': ('recursive_avg', PRECISION),
+ 'total.recursion.time.median': ('recursive_med', PRECISION),
+ 'msg.cache.count': ('cache_message', 1),
+ 'rrset.cache.count': ('cache_rrset', 1),
+ 'infra.cache.count': ('cache_infra', 1),
+ 'key.cache.count': ('cache_key', 1),
+ 'dnscrypt_shared_secret.cache.count': ('cache_dnscss', 1),
+ 'dnscrypt_nonce.cache.count': ('cache_dnscn', 1)
+}
+
+# Same as above, but for per-thread stats.
+PER_THREAD_STAT_MAP = {
+ '{shortname}.num.queries_ip_ratelimited': ('{shortname}_ratelimit', 1),
+ '{shortname}.num.cachehits': ('{shortname}_cachehit', 1),
+ '{shortname}.num.cachemiss': ('{shortname}_cachemiss', 1),
+ '{shortname}.num.zero_ttl': ('{shortname}_expired', 1),
+ '{shortname}.num.prefetch': ('{shortname}_prefetch', 1),
+ '{shortname}.num.recursivereplies': ('{shortname}_recursive', 1),
+ '{shortname}.requestlist.avg': ('{shortname}_reqlist_avg', 1),
+ '{shortname}.requestlist.max': ('{shortname}_reqlist_max', 1),
+ '{shortname}.requestlist.overwritten': ('{shortname}_reqlist_overwritten', 1),
+ '{shortname}.requestlist.exceeded': ('{shortname}_reqlist_exceeded', 1),
+ '{shortname}.requestlist.current.all': ('{shortname}_reqlist_current', 1),
+ '{shortname}.requestlist.current.user': ('{shortname}_reqlist_user', 1),
+ '{shortname}.recursion.time.avg': ('{shortname}_recursive_avg', PRECISION),
+ '{shortname}.recursion.time.median': ('{shortname}_recursive_med', PRECISION)
+}
+
+
+# Used to actually generate per-thread charts.
+def _get_perthread_info(thread):
+ sname = 'thread{0}'.format(thread)
+ lname = 'Thread {0}'.format(thread)
+ charts = dict()
+ order = []
+ statmap = dict()
+
+ for item in PER_THREAD_CHARTS:
+ cname = '{0}{1}'.format(sname, item)
+ chart = deepcopy(PER_THREAD_CHARTS[item])
+ chart['options'][1] = chart['options'][1].format(longname=lname)
+
+ for index, line in enumerate(chart['lines']):
+ chart['lines'][index][0] = line[0].format(shortname=sname)
+
+ order.append(cname)
+ charts[cname] = chart
+
+ for key, value in PER_THREAD_STAT_MAP.items():
+ statmap[key.format(shortname=sname)] = (value[0].format(shortname=sname), value[1])
+
+ return (charts, order, statmap)
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ # The unbound control protocol is always TLS encapsulated
+ # unless it's used over a UNIX socket, so enable TLS _before_
+ # doing the normal SocketService initialization.
+ configuration['tls'] = True
+ self.port = 8935
+ SocketService.__init__(self, configuration, name)
+ self.ext = self.configuration.get('extended', None)
+ self.ubconf = self.configuration.get('ubconf', None)
+ self.perthread = self.configuration.get('per_thread', False)
+ self.threads = None
+ self.order = deepcopy(ORDER)
+ self.definitions = deepcopy(CHARTS)
+ self.request = 'UBCT1 stats\n'
+ self.statmap = deepcopy(STAT_MAP)
+ self._parse_config()
+ self._auto_config()
+ self.debug('Extended stats: {0}'.format(self.ext))
+ self.debug('Per-thread stats: {0}'.format(self.perthread))
+ if self.ext:
+ self.order = self.order + EXTENDED_ORDER
+ self.definitions.update(EXTENDED_CHARTS)
+ if self.unix_socket:
+ self.debug('Using unix socket: {0}'.format(self.unix_socket))
+ else:
+ self.debug('Connecting to: {0}:{1}'.format(self.host, self.port))
+ self.debug('Using key: {0}'.format(self.key))
+ self.debug('Using certificate: {0}'.format(self.cert))
+
+ def _auto_config(self):
+ if self.ubconf and os.access(self.ubconf, os.R_OK):
+ self.debug('Unbound config: {0}'.format(self.ubconf))
+ conf = YamlOrderedLoader.load_config_from_file(self.ubconf)[0]
+ if self.ext is None:
+ if 'extended-statistics' in conf['server']:
+ self.ext = conf['server']['extended-statistics']
+ if 'remote-control' in conf:
+ if conf['remote-control'].get('control-use-cert', False):
+ self.key = self.key or conf['remote-control'].get('control-key-file')
+ self.cert = self.cert or conf['remote-control'].get('control-cert-file')
+ self.port = self.port or conf['remote-control'].get('control-port')
+ else:
+ self.unix_socket = self.unix_socket or conf['remote-control'].get('control-interface')
+ else:
+ self.debug('Unbound configuration not found.')
+ if not self.key:
+ self.key = '/etc/unbound/unbound_control.key'
+ if not self.cert:
+ self.cert = '/etc/unbound/unbound_control.pem'
+ if not self.port:
+ self.port = 8953
+
+ def _generate_perthread_charts(self):
+ tmporder = list()
+ for thread in range(0, self.threads):
+ charts, order, statmap = _get_perthread_info(thread)
+ tmporder.extend(order)
+ self.definitions.update(charts)
+ self.statmap.update(statmap)
+ self.order.extend(sorted(tmporder))
+
+ def check(self):
+ # Check if authentication is working.
+ self._connect()
+ result = bool(self._sock)
+ self._disconnect()
+ # If auth works, and we need per-thread charts, query the server
+ # to see how many threads it's using. This somewhat abuses the
+ # SocketService API to get the data we need.
+ if result and self.perthread:
+ tmp = self.request
+ if sys.version_info[0] < 3:
+ self.request = 'UBCT1 status\n'
+ else:
+ self.request = b'UBCT1 status\n'
+ raw = self._get_raw_data()
+ for line in raw.splitlines():
+ if line.startswith('threads'):
+ self.threads = int(line.split()[1])
+ self._generate_perthread_charts()
+ break
+ if self.threads is None:
+ self.info('Unable to auto-detect thread counts, disabling per-thread stats.')
+ self.perthread = False
+ self.request = tmp
+ return result
+
+ @staticmethod
+ def _check_raw_data(data):
+ # The server will close the connection when it's done sending
+ # data, so just keep looping until that happens.
+ return False
+
+ def _get_data(self):
+ raw = self._get_raw_data()
+ data = dict()
+ tmp = dict()
+ for line in raw.splitlines():
+ stat = line.split('=')
+ tmp[stat[0]] = stat[1]
+ for item in self.statmap:
+ if item in tmp:
+ data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1]
+ return data
diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf
new file mode 100644
index 000000000..46c4b097f
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/unbound.conf
@@ -0,0 +1,87 @@
+# netdata python.d.plugin configuration for unbound
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_everye
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, unbound also supports the following:
+#
+# host: localhost # The host to connect to.
+# port: 8953 # WHat port to use (defaults to 8953)
+# socket: /path/to/socket # A path to a UNIX socket to use instead
+# # of a TCP connection
+# tls_key_file: /path/to/key # The key file to use for authentication
+# tls_cert_file: /path/to/key # The certificate to use for authentication
+# extended: false # Whether to collect extended stats or not
+# per_thread: false # Whether to show charts for per-thread stats
+#
+# In addition to the above, you can set the following to try and
+# auto-detect most settings based on the unbound configuration:
+#
+# ubconf: /etc/unbound/unbound.conf
+#
+# Note that the SSL key and certificate need to be readable by the user
+# unbound runs as if you're using the regular control interface.
+# If you're using a UNIX socket, that has to be readable by the netdata user.
+
+# The following should work for most users if they have unbound configured
+# correctly.
+local:
+ ubconf: /etc/unbound/unbound.conf
diff --git a/collectors/python.d.plugin/uwsgi/Makefile.inc b/collectors/python.d.plugin/uwsgi/Makefile.inc
new file mode 100644
index 000000000..75d96de0e
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += uwsgi/uwsgi.chart.py
+dist_pythonconfig_DATA += uwsgi/uwsgi.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += uwsgi/README.md uwsgi/Makefile.inc
+
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
new file mode 100644
index 000000000..a062710df
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -0,0 +1,37 @@
+# uwsgi
+
+Module monitor uwsgi performance metrics.
+
+https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
+
+lines are creates dynamically based on how many workers are there
+
+Following charts are drawn:
+
+1. **Requests**
+ * requests per second
+ * transmitted data
+ * average request time
+
+2. **Memory**
+ * rss
+ * vsz
+
+3. **Exceptions**
+4. **Harakiris**
+5. **Respawns**
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`.
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
new file mode 100644
index 000000000..5ebcfb55b
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+# Description: uwsgi netdata python.d module
+# Author: Robbert Segeren (robbert-ef)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+from copy import deepcopy
+from bases.FrameworkServices.SocketService import SocketService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+ORDER = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+ 'exceptions',
+ 'harakiri',
+ 'respawn',
+]
+
+DYNAMIC_CHARTS = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+]
+
+# NOTE: lines are created dynamically in `check()` method
+CHARTS = {
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'uwsgi.requests', 'stacked'],
+ 'lines': [
+ ['requests', 'requests', 'incremental']
+ ]
+ },
+ 'tx': {
+ 'options': [None, 'Transmitted data', 'KB/s', 'requests', 'uwsgi.tx', 'stacked'],
+ 'lines': [
+ ['tx', 'tx', 'incremental']
+ ]
+ },
+ 'avg_rt': {
+ 'options': [None, 'Average request time', 'ms', 'requests', 'uwsgi.avg_rt', 'line'],
+ 'lines': [
+ ['avg_rt', 'avg_rt', 'absolute']
+ ]
+ },
+ 'memory_rss': {
+ 'options': [None, 'RSS (Resident Set Size)', 'MB', 'memory', 'uwsgi.memory_rss', 'stacked'],
+ 'lines': [
+ ['memory_rss', 'memory_rss', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'memory_vsz': {
+ 'options': [None, 'VSZ (Virtual Memory Size)', 'MB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
+ 'lines': [
+ ['memory_vsz', 'memory_vsz', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'exceptions': {
+ 'options': [None, 'Exceptions', 'exceptions', 'exceptions', 'uwsgi.exceptions', 'line'],
+ 'lines': [
+ ['exceptions', 'exceptions', 'incremental']
+ ]
+ },
+ 'harakiri': {
+ 'options': [None, 'Harakiris', 'harakiris', 'harakiris', 'uwsgi.harakiris', 'line'],
+ 'lines': [
+ ['harakiri_count', 'harakiris', 'incremental']
+ ]
+ },
+ 'respawn': {
+ 'options': [None, 'Respawns', 'respawns', 'respawns', 'uwsgi.respawns', 'line'],
+ 'lines': [
+ ['respawn_count', 'respawns', 'incremental']
+ ]
+ },
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.url = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 1717)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+
+ # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
+ for chart in DYNAMIC_CHARTS:
+ self.definitions[chart]['lines'] = []
+
+ self.last_result = {}
+ self.workers = []
+
+ def read_data(self):
+ """
+ Read data from socket and parse as JSON.
+ :return: (dict) stats
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ try:
+ return json.loads(raw_data)
+ except ValueError as err:
+ self.error(err)
+ return None
+
+ def check(self):
+ """
+ Parse configuration and check if we can read data.
+ :return: boolean
+ """
+ self._parse_config()
+ return bool(self.read_data())
+
+ def add_worker_dimensions(self, key):
+ """
+ Helper to add dimensions for a worker.
+ :param key: (int or str) worker identifier
+ :return:
+ """
+ for chart in DYNAMIC_CHARTS:
+ for line in CHARTS[chart]['lines']:
+ dimension_id = '{}_{}'.format(line[0], key)
+ dimension_name = str(key)
+
+ dimension = [dimension_id, dimension_name] + line[2:]
+ self.charts[chart].add_dimension(dimension)
+
+ @staticmethod
+ def _check_raw_data(data):
+ # The server will close the connection when it's done sending
+ # data, so just keep looping until that happens.
+ return False
+
+ def _get_data(self):
+ """
+ Read data from socket
+ :return: dict
+ """
+ stats = self.read_data()
+ if not stats:
+ return None
+
+ result = {
+ 'exceptions': 0,
+ 'harakiri_count': 0,
+ 'respawn_count': 0,
+ }
+
+ for worker in stats['workers']:
+ key = worker['pid']
+
+ # Add dimensions for new workers
+ if key not in self.workers:
+ self.add_worker_dimensions(key)
+ self.workers.append(key)
+
+ result['requests_{}'.format(key)] = worker['requests']
+ result['tx_{}'.format(key)] = worker['tx']
+ result['avg_rt_{}'.format(key)] = worker['avg_rt']
+
+ # avg_rt is not reset by uwsgi, so reset here
+ if self.last_result.get('requests_{}'.format(key)) == worker['requests']:
+ result['avg_rt_{}'.format(key)] = 0
+
+ result['memory_rss_{}'.format(key)] = worker['rss']
+ result['memory_vsz_{}'.format(key)] = worker['vsz']
+
+ result['exceptions'] += worker['exceptions']
+ result['harakiri_count'] += worker['harakiri_count']
+ result['respawn_count'] += worker['respawn_count']
+
+ self.last_result = result
+ return result
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf
new file mode 100644
index 000000000..be1c2ada3
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.conf
@@ -0,0 +1,94 @@
+# netdata python.d.plugin configuration for uwsgi
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, uwsgi also supports the following:
+#
+# socket: 'path/to/uwsgistats.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 1717
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 1717
diff --git a/collectors/python.d.plugin/varnish/Makefile.inc b/collectors/python.d.plugin/varnish/Makefile.inc
new file mode 100644
index 000000000..2469b0592
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += varnish/varnish.chart.py
+dist_pythonconfig_DATA += varnish/varnish.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += varnish/README.md varnish/Makefile.inc
+
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
new file mode 100644
index 000000000..96c7cafaa
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -0,0 +1,69 @@
+# varnish
+
+Module uses the `varnishstat` command to provide varnish cache statistics.
+
+It produces:
+
+1. **Connections Statistics** in connections/s
+ * accepted
+ * dropped
+
+2. **Client Requests** in requests/s
+ * received
+
+3. **All History Hit Rate Ratio** in percent
+ * hit
+ * miss
+ * hitpass
+
+4. **Current Poll Hit Rate Ratio** in percent
+ * hit
+ * miss
+ * hitpass
+
+5. **Expired Objects** in expired/s
+ * objects
+
+6. **Least Recently Used Nuked Objects** in nuked/s
+ * objects
+
+
+7. **Number Of Threads In All Pools** in threads
+ * threads
+
+8. **Threads Statistics** in threads/s
+ * created
+ * failed
+ * limited
+
+9. **Current Queue Length** in requests
+ * in queue
+
+10. **Backend Connections Statistics** in connections/s
+ * successful
+ * unhealthy
+ * reused
+ * closed
+ * resycled
+ * failed
+
+10. **Requests To The Backend** in requests/s
+ * received
+
+11. **ESI Statistics** in problems/s
+ * errors
+ * warnings
+
+12. **Memory Usage** in MB
+ * free
+ * allocated
+
+13. **Uptime** in seconds
+ * uptime
+
+
+### configuration
+
+No configuration is needed.
+
+---
diff --git a/python.d/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
index d8145c0b6..d889c2b33 100644
--- a/python.d/varnish.chart.py
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Description: varnish netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
import re
@@ -12,13 +13,22 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
priority = 60000
retries = 60
-ORDER = ['session_connections', 'client_requests',
- 'all_time_hit_rate', 'current_poll_hit_rate', 'cached_objects_expired', 'cached_objects_nuked',
- 'threads_total', 'threads_statistics', 'threads_queue_len',
- 'backend_connections', 'backend_requests',
- 'esi_statistics',
- 'memory_usage',
- 'uptime']
+ORDER = [
+ 'session_connections',
+ 'client_requests',
+ 'all_time_hit_rate',
+ 'current_poll_hit_rate',
+ 'cached_objects_expired',
+ 'cached_objects_nuked',
+ 'threads_total',
+ 'threads_statistics',
+ 'threads_queue_len',
+ 'backend_connections',
+ 'backend_requests',
+ 'esi_statistics',
+ 'memory_usage',
+ 'uptime'
+]
CHARTS = {
'session_connections': {
@@ -213,8 +223,9 @@ class Service(ExecutableService):
data.update(dict((param, value) for _, param, value in server_stats))
- data['memory_allocated'] = data['s0.g_bytes']
- data['memory_free'] = data['s0.g_space']
+ # varnish 5 uses default.g_bytes and default.g_space
+ data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
+ data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
return data
diff --git a/conf.d/python.d/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
index 4b069d514..4b069d514 100644
--- a/conf.d/python.d/varnish.conf
+++ b/collectors/python.d.plugin/varnish/varnish.conf
diff --git a/collectors/python.d.plugin/w1sensor/Makefile.inc b/collectors/python.d.plugin/w1sensor/Makefile.inc
new file mode 100644
index 000000000..bddf146f5
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += w1sensor/w1sensor.chart.py
+dist_pythonconfig_DATA += w1sensor/w1sensor.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += w1sensor/README.md w1sensor/Makefile.inc
+
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
new file mode 100644
index 000000000..b18f08351
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -0,0 +1,13 @@
+# w1sensor
+
+Data from 1-Wire sensors.
+On Linux these are supported by the wire, w1_gpio, and w1_therm modules.
+Currently temperature sensors are supported and automatically detected.
+
+Charts are created dynamically based on the number of detected sensors.
+
+### configuration
+
+For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file.
+
+---
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
new file mode 100644
index 000000000..493c4a135
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Description: 1-wire temperature monitor netdata python.d module
+# Author: Diomidis Spinellis <http://www.spinellis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+# Location where 1-Wire devices can be found
+W1_DIR = '/sys/bus/w1/devices/'
+
+# Lines matching the following regular expression contain a temperature value
+RE_TEMP = re.compile(r' t=(\d+)')
+
+ORDER = ['temp']
+
+CHARTS = {
+ 'temp': {
+ 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'],
+ 'lines': []
+ }
+}
+
+# Known and supported family members
+# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c
+THERM_FAMILY = {
+ '10': 'W1_THERM_DS18S20',
+ '22': 'W1_THERM_DS1822',
+ '28': 'W1_THERM_DS18B20',
+ '3b': 'W1_THERM_DS1825',
+ '42': 'W1_THERM_DS28EA00',
+}
+
+
+class Service(SimpleService):
+ """Provide netdata service for 1-Wire sensors"""
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.probes = []
+
+ def check(self):
+ """Auto-detect available 1-Wire sensors, setting line definitions
+ and probes to be monitored."""
+ try:
+ file_names = os.listdir(W1_DIR)
+ except OSError as err:
+ self.error(err)
+ return False
+
+ lines = []
+ for file_name in file_names:
+ if file_name[2] != '-':
+ continue
+ if not file_name[0:2] in THERM_FAMILY:
+ continue
+
+ self.probes.append(file_name)
+ identifier = file_name[3:]
+ name = identifier
+ config_name = self.configuration.get('name_' + identifier)
+ if config_name:
+ name = config_name
+ lines.append(['w1sensor_temp_' + identifier, name, 'absolute',
+ 1, 10])
+ self.definitions['temp']['lines'] = lines
+ return len(self.probes) > 0
+
+ def get_data(self):
+ """Return data read from sensors."""
+ data = dict()
+
+ for file_name in self.probes:
+ file_path = W1_DIR + file_name + '/w1_slave'
+ identifier = file_name[3:]
+ try:
+ with open(file_path, 'r') as device_file:
+ for line in device_file:
+ matched = RE_TEMP.search(line)
+ if matched:
+ # Round to one decimal digit to filter-out noise
+ value = round(int(matched.group(1)) / 1000., 1)
+ value = int(value * 10)
+ data['w1sensor_temp_' + identifier] = value
+ except (OSError, IOError) as err:
+ self.error(err)
+ continue
+ return data or None
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf
new file mode 100644
index 000000000..a4aed8dd7
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf
@@ -0,0 +1,74 @@
+# netdata python.d.plugin configuration for w1sensor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 5 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# name_<1-Wire id>: '<human readable name>'
+# This allows associating a human readable name with a sensor's 1-Wire
+# identifier. Example:
+# name_00000022276e: 'Machine room'
+# name_00000022298f: 'Rack 12'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/web_log/Makefile.inc b/collectors/python.d.plugin/web_log/Makefile.inc
new file mode 100644
index 000000000..893115992
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += web_log/web_log.chart.py
+dist_pythonconfig_DATA += web_log/web_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += web_log/README.md web_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
new file mode 100644
index 000000000..6e8ea1dd5
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -0,0 +1,64 @@
+# web_log
+
+Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics.
+
+It produces following charts:
+
+1. **Response by type** requests/s
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+2. **Response by code family** requests/s
+ * 1xx (informational)
+ * 2xx (successful)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 5xx (internal server errors)
+ * other (non-standart responses)
+ * unmatched (the lines in the log file that are not matched)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Bandwidth** KB/s
+ * received (bandwidth of requests)
+ * send (bandwidth of responses)
+
+5. **Timings** ms (request processing time)
+ * min (bandwidth of requests)
+ * max (bandwidth of responses)
+ * average (bandwidth of responses)
+
+6. **Request per url** requests/s (configured by user)
+
+7. **Http Methods** requests/s (requests per http method)
+
+8. **Http Versions** requests/s (requests per http version)
+
+9. **IP protocols** requests/s (requests per ip protocol version)
+
+10. **Current Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration)
+
+11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata)
+
+
+### configuration
+
+```yaml
+nginx_log:
+ name : 'nginx_log'
+ path : '/var/log/nginx/access.log'
+
+apache_log:
+ name : 'apache_log'
+ path : '/var/log/apache/other_vhosts_access.log'
+ categories:
+ cacti : 'cacti.*'
+ observium : 'observium'
+```
+
+Module has preconfigured jobs for nginx, apache and gunicorn on various distros.
+
+---
diff --git a/python.d/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
index be9baba92..20e15f4cb 100644
--- a/python.d/web_log.chart.py
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
@@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-
# Description: web log netdata python.d module
# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
import bisect
import re
import os
-import sys
from collections import namedtuple, defaultdict
from copy import deepcopy
@@ -16,21 +16,54 @@ except ImportError:
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
+try:
+ from sys import maxint
+except ImportError:
+ from sys import maxsize as maxint
+
from bases.collection import read_last_line
from bases.FrameworkServices.LogService import LogService
ORDER_APACHE_CACHE = ['apache_cache']
-ORDER_WEB = ['response_statuses', 'response_codes', 'bandwidth',
- 'response_time', 'response_time_hist', 'response_time_upstream', 'response_time_upstream_hist',
- 'requests_per_url', 'requests_per_user_defined', 'http_method', 'http_version',
- 'requests_per_ipproto', 'clients', 'clients_all']
-
-ORDER_SQUID = ['squid_response_statuses', 'squid_response_codes', 'squid_detailed_response_codes',
- 'squid_method', 'squid_mime_type', 'squid_hier_code', 'squid_transport_methods',
- 'squid_transport_errors', 'squid_code', 'squid_handling_opts', 'squid_object_types',
- 'squid_cache_events', 'squid_bytes', 'squid_duration', 'squid_clients', 'squid_clients_all']
+ORDER_WEB = [
+ 'response_statuses',
+ 'response_codes',
+ 'bandwidth',
+ 'response_time',
+ 'response_time_hist',
+ 'response_time_upstream',
+ 'response_time_upstream_hist',
+ 'requests_per_url',
+ 'requests_per_user_defined',
+ 'http_method',
+ 'vhost',
+ 'port',
+ 'http_version',
+ 'requests_per_ipproto',
+ 'clients',
+ 'clients_all'
+]
+
+ORDER_SQUID = [
+ 'squid_response_statuses',
+ 'squid_response_codes',
+ 'squid_detailed_response_codes',
+ 'squid_method',
+ 'squid_mime_type',
+ 'squid_hier_code',
+ 'squid_transport_methods',
+ 'squid_transport_errors',
+ 'squid_code',
+ 'squid_handling_opts',
+ 'squid_object_types',
+ 'squid_cache_events',
+ 'squid_bytes',
+ 'squid_duration',
+ 'squid_clients',
+ 'squid_clients_all'
+]
CHARTS_WEB = {
'response_codes': {
@@ -43,24 +76,27 @@ CHARTS_WEB = {
['1xx', None, 'incremental'],
['0xx', 'other', 'incremental'],
['unmatched', None, 'incremental']
- ]},
+ ]
+ },
'bandwidth': {
'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
'lines': [
['resp_length', 'received', 'incremental', 8, 1000],
['bytes_sent', 'sent', 'incremental', -8, 1000]
- ]},
+ ]
+ },
'response_time': {
'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
'lines': [
['resp_time_min', 'min', 'incremental', 1, 1000],
['resp_time_max', 'max', 'incremental', 1, 1000],
['resp_time_avg', 'avg', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'response_time_hist': {
'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'response_time_upstream': {
'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
'web_log.response_time_upstream', 'area'],
@@ -68,62 +104,80 @@ CHARTS_WEB = {
['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'response_time_upstream_hist': {
'options': [None, 'Processing Time Histogram', 'requests/s', 'timings',
'web_log.response_time_upstream_hist', 'line'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'clients': {
'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
'lines': [
['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1],
['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1]
- ]},
+ ]
+ },
'clients_all': {
'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'],
'lines': [
['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1],
['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1]
- ]},
+ ]
+ },
'http_method': {
'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'],
'lines': [
['GET', 'GET', 'incremental', 1, 1]
- ]},
+ ]
+ },
'http_version': {
'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions',
'web_log.http_version', 'stacked'],
- 'lines': []},
+ 'lines': []
+ },
'requests_per_ipproto': {
'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto',
'stacked'],
'lines': [
['req_ipv4', 'ipv4', 'incremental', 1, 1],
['req_ipv6', 'ipv6', 'incremental', 1, 1]
- ]},
+ ]
+ },
'response_statuses': {
- 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses',
- 'stacked'],
+ 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses', 'stacked'],
'lines': [
['successful_requests', 'success', 'incremental', 1, 1],
['server_errors', 'error', 'incremental', 1, 1],
['redirects', 'redirect', 'incremental', 1, 1],
['bad_requests', 'bad', 'incremental', 1, 1],
['other_requests', 'other', 'incremental', 1, 1]
- ]},
+ ]
+ },
'requests_per_url': {
- 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url',
- 'stacked'],
+ 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url', 'stacked'],
'lines': [
['url_pattern_other', 'other', 'incremental', 1, 1]
- ]},
+ ]
+ },
'requests_per_user_defined': {
'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
'web_log.requests_per_user_defined', 'stacked'],
'lines': [
['user_pattern_other', 'other', 'incremental', 1, 1]
- ]}
+ ]
+ },
+ 'port': {
+ 'options': [None, 'Requests Per Port', 'requests/s', 'port', 'web_log.port', 'stacked'],
+ 'lines': [
+ ['port_80', 'http', 'incremental', 1, 1],
+ ['port_443', 'https', 'incremental', 1, 1]
+ ]
+ },
+ 'vhost': {
+ 'options': [None, 'Requests Per Vhost', 'requests/s', 'vhost', 'web_log.vhost', 'stacked'],
+ 'lines': []
+ }
}
CHARTS_APACHE_CACHE = {
@@ -131,10 +185,11 @@ CHARTS_APACHE_CACHE = {
'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache',
'stacked'],
'lines': [
- ["hit", 'cache', "percentage-of-absolute-row"],
- ["miss", None, "percentage-of-absolute-row"],
- ["other", None, "percentage-of-absolute-row"]
- ]}
+ ['hit', 'cache', 'percentage-of-absolute-row'],
+ ['miss', None, 'percentage-of-absolute-row'],
+ ['other', None, 'percentage-of-absolute-row']
+ ]
+ }
}
CHARTS_SQUID = {
@@ -145,13 +200,15 @@ CHARTS_SQUID = {
['duration_min', 'min', 'incremental', 1, 1000],
['duration_max', 'max', 'incremental', 1, 1000],
['duration_avg', 'avg', 'incremental', 1, 1000]
- ]},
+ ]
+ },
'squid_bytes': {
'options': [None, 'Amount Of Data Delivered To The Clients',
'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
'lines': [
['bytes', 'sent', 'incremental', 8, 1000]
- ]},
+ ]
+ },
'squid_response_statuses': {
'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
'stacked'],
@@ -161,7 +218,8 @@ CHARTS_SQUID = {
['redirects', 'redirect', 'incremental', 1, 1],
['bad_requests', 'bad', 'incremental', 1, 1],
['other_requests', 'other', 'incremental', 1, 1]
- ]},
+ ]
+ },
'squid_response_codes': {
'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
'web_log.squid_response_codes', 'stacked'],
@@ -174,89 +232,113 @@ CHARTS_SQUID = {
['0xx', None, 'incremental'],
['other', None, 'incremental'],
['unmatched', None, 'incremental']
- ]},
+ ]
+ },
'squid_code': {
'options': [None, 'Responses Per Cache Result Of The Request',
'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_detailed_response_codes': {
'options': [None, 'Detailed Response Codes',
'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_hier_code': {
'options': [None, 'Responses Per Hierarchy Code',
'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_method': {
'options': [None, 'Requests Per Method',
'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_mime_type': {
'options': [None, 'Requests Per MIME Type',
'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_clients': {
'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
'web_log.squid_clients', 'stacked'],
'lines': [
['unique_ipv4', 'ipv4', 'incremental'],
['unique_ipv6', 'ipv6', 'incremental']
- ]},
+ ]
+ },
'squid_clients_all': {
'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
'web_log.squid_clients_all', 'stacked'],
'lines': [
['unique_tot_ipv4', 'ipv4', 'absolute'],
['unique_tot_ipv6', 'ipv6', 'absolute']
- ]},
+ ]
+ },
'squid_transport_methods': {
'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
'web_log.squid_transport_methods', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_transport_errors': {
'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
'web_log.squid_transport_errors', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_handling_opts': {
'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
'web_log.squid_handling_opts', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_object_types': {
'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
'web_log.squid_object_types', 'stacked'],
- 'lines': [
- ]},
+ 'lines': []
+ },
'squid_cache_events': {
'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
'web_log.squid_cache_events', 'stacked'],
- 'lines': [
- ]}
+ 'lines': []
+ }
}
NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
-SQUID_CODES = dict(TCP='squid_transport_methods', UDP='squid_transport_methods', NONE='squid_transport_methods',
- CLIENT='squid_handling_opts', IMS='squid_handling_opts', ASYNC='squid_handling_opts',
- SWAPFAIL='squid_handling_opts', REFRESH='squid_handling_opts', SHARED='squid_handling_opts',
- REPLY='squid_handling_opts', NEGATIVE='squid_object_types', STALE='squid_object_types',
- OFFLINE='squid_object_types', INVALID='squid_object_types', FAIL='squid_object_types',
- MODIFIED='squid_object_types', UNMODIFIED='squid_object_types', REDIRECT='squid_object_types',
- HIT='squid_cache_events', MEM='squid_cache_events', MISS='squid_cache_events',
- DENIED='squid_cache_events', NOFETCH='squid_cache_events', TUNNEL='squid_cache_events',
- ABORTED='squid_transport_errors', TIMEOUT='squid_transport_errors')
+SQUID_CODES = {
+ 'TCP': 'squid_transport_methods',
+ 'UDP': 'squid_transport_methods',
+ 'NONE': 'squid_transport_methods',
+ 'CLIENT': 'squid_handling_opts',
+ 'IMS': 'squid_handling_opts',
+ 'ASYNC': 'squid_handling_opts',
+ 'SWAPFAIL': 'squid_handling_opts',
+ 'REFRESH': 'squid_handling_opts',
+ 'SHARED': 'squid_handling_opts',
+ 'REPLY': 'squid_handling_opts',
+ 'NEGATIVE': 'squid_object_types',
+ 'STALE': 'squid_object_types',
+ 'OFFLINE': 'squid_object_types',
+ 'INVALID': 'squid_object_types',
+ 'FAIL': 'squid_object_types',
+ 'MODIFIED': 'squid_object_types',
+ 'UNMODIFIED': 'squid_object_types',
+ 'REDIRECT': 'squid_object_types',
+ 'HIT': 'squid_cache_events',
+ 'MEM': 'squid_cache_events',
+ 'MISS': 'squid_cache_events',
+ 'DENIED': 'squid_cache_events',
+ 'NOFETCH': 'squid_cache_events',
+ 'TUNNEL': 'squid_cache_events',
+ 'ABORTED': 'squid_transport_errors',
+ 'TIMEOUT': 'squid_transport_errors'
+}
REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)')
+MIME_TYPES = ['application', 'audio', 'example', 'font', 'image', 'message', 'model', 'multipart', 'text', 'video']
+
class Service(LogService):
def __init__(self, configuration=None, name=None):
@@ -283,7 +365,7 @@ class Service(LogService):
log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
if log_type not in log_types:
- self.error("bad log type {log_type}. Supported types: {types}".format(log_type=log_type,
+ self.error('bad log type {log_type}. Supported types: {types}'.format(log_type=log_type,
types=log_types.keys()))
return False
@@ -317,12 +399,35 @@ class Web:
self.definitions = deepcopy(CHARTS_WEB)
self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
self.storage = dict()
- self.data = {'bytes_sent': 0, 'resp_length': 0, 'resp_time_min': 0, 'resp_time_max': 0,
- 'resp_time_avg': 0, 'resp_time_upstream_min': 0, 'resp_time_upstream_max': 0,
- 'resp_time_upstream_avg': 0, 'unique_cur_ipv4': 0, 'unique_cur_ipv6': 0, '2xx': 0,
- '5xx': 0, '3xx': 0, '4xx': 0, '1xx': 0, '0xx': 0, 'unmatched': 0, 'req_ipv4': 0,
- 'req_ipv6': 0, 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
- 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0, 'GET': 0}
+ self.data = {
+ 'bytes_sent': 0,
+ 'resp_length': 0,
+ 'resp_time_min': 0,
+ 'resp_time_max': 0,
+ 'resp_time_avg': 0,
+ 'resp_time_upstream_min': 0,
+ 'resp_time_upstream_max': 0,
+ 'resp_time_upstream_avg': 0,
+ 'unique_cur_ipv4': 0,
+ 'unique_cur_ipv6': 0,
+ '2xx': 0,
+ '5xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '1xx': 0,
+ '0xx': 0,
+ 'unmatched': 0,
+ 'req_ipv4': 0,
+ 'req_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ 'GET': 0
+ }
def __getattr__(self, item):
return getattr(self.service, item)
@@ -367,21 +472,21 @@ class Web:
histogram = self.configuration.get('histogram', None)
if isinstance(histogram, list):
self.storage['bucket_index'] = histogram[:]
- self.storage['bucket_index'].append(sys.maxint)
+ self.storage['bucket_index'].append(maxint)
self.storage['buckets'] = [0] * (len(histogram) + 1)
self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
hist_lines = self.definitions['response_time_hist']['lines']
upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
for i, le in enumerate(histogram):
- hist_key = "response_time_hist_%d" % i
- upstream_hist_key = "response_time_upstream_hist_%d" % i
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
- hist_lines.append(["response_time_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1])
- upstream_hist_lines.append(["response_time_upstream_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1])
+ hist_lines.append(['response_time_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
+ upstream_hist_lines.append(['response_time_upstream_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
elif histogram is not None:
- self.error("expect histogram list, but was {0}".format(type(histogram)))
+ self.error('expect histogram list, but was {0}'.format(type(histogram)))
if not self.configuration.get('all_time', True):
self.order.remove('clients_all')
@@ -395,10 +500,11 @@ class Web:
for code in codes:
self.order.append('detailed_response_codes%s' % code)
- self.definitions['detailed_response_codes%s' % code] \
- = {'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
- 'web_log.detailed_response_codes%s' % code, 'stacked'],
- 'lines': []}
+ self.definitions['detailed_response_codes%s' % code] = {
+ 'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
+ 'web_log.detailed_response_codes%s' % code, 'stacked'],
+ 'lines': []
+ }
# Add 'requests_per_url' chart if specified in the configuration
if self.storage['url_pattern']:
@@ -499,8 +605,8 @@ class Web:
buckets = self.storage['buckets']
upstream_buckets = self.storage['upstream_buckets']
for i in range(0, len(self.storage['bucket_index'])):
- hist_key = "response_time_hist_%d" % i
- upstream_hist_key = "response_time_upstream_hist_%d" % i
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
self.data[hist_key] = buckets[i]
self.data[upstream_hist_key] = upstream_buckets[i]
@@ -596,7 +702,7 @@ class Web:
We are here only if "custom_log_format" is in logs. We need to make sure:
1. "custom_log_format" is a dict
2. "pattern" in "custom_log_format" and pattern is <str> instance
- 3. if "time_multiplier" is in "custom_log_format" it must be <int> instance
+ 3. if "time_multiplier" is in "custom_log_format" it must be <int> or <float> instance
If all parameters is ok we need to make sure:
1. Pattern search is success
@@ -623,8 +729,8 @@ class Web:
resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0
- if not isinstance(resp_time_func, int):
- return find_regex_return(msg='Custom log: "time_multiplier" is not an integer')
+ if not isinstance(resp_time_func, (int, float)):
+ return find_regex_return(msg='Custom log: "time_multiplier" is not an integer or a float')
try:
regex = re.compile(pattern)
@@ -701,6 +807,23 @@ class Web:
'incremental'])
self.data[dim_id] = 0
self.data[dim_id] += 1
+ # requests per port number
+ if match_dict.get('port'):
+ if match_dict['port'] not in self.data:
+ self.charts['port'].add_dimension([match_dict['port'],
+ match_dict['port'],
+ 'incremental'])
+ self.data[match_dict['port']] = 0
+ self.data[match_dict['port']] += 1
+ # requests per vhost
+ if match_dict.get('vhost'):
+ dim_id = match_dict['vhost'].replace('.', '_')
+ if dim_id not in self.data:
+ self.charts['vhost'].add_dimension([dim_id,
+ match_dict['vhost'],
+ 'incremental'])
+ self.data[dim_id] = 0
+ self.data[dim_id] += 1
def get_data_per_response_codes_detailed(self, code):
"""
@@ -788,12 +911,29 @@ class Squid:
self.definitions = CHARTS_SQUID
self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
self.storage = dict()
- self.data = {'duration_max': 0, 'duration_avg': 0, 'duration_min': 0, 'bytes': 0,
- '0xx': 0, '1xx': 0, '2xx': 0, '3xx': 0, '4xx': 0, '5xx': 0,
- 'other': 0, 'unmatched': 0, 'unique_ipv4': 0, 'unique_ipv6': 0,
- 'unique_tot_ipv4': 0, 'unique_tot_ipv6': 0, 'successful_requests': 0,
- 'redirects': 0, 'bad_requests': 0, 'server_errors': 0, 'other_requests': 0
- }
+ self.data = {
+ 'duration_max': 0,
+ 'duration_avg': 0,
+ 'duration_min': 0,
+ 'bytes': 0,
+ '0xx': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'unmatched': 0,
+ 'unique_ipv4': 0,
+ 'unique_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0
+ }
def __getattr__(self, item):
return getattr(self.service, item)
@@ -811,30 +951,35 @@ class Squid:
r' (?P<method>[A-Z_]+)'
r' (?P<url>[^ ]+)'
r' (?P<user>[^ ]+)'
- r' (?P<hier_code>[A-Z_]+)/[\da-f.:-]+'
- r' (?P<mime_type>[^\n]+)')
+ r' (?P<hier_code>[A-Z_]+)/[\da-z.:-]+'
+ r' (?P<mime_type>[A-Za-z-]*)')
match = self.storage['regex'].search(last_line)
if not match:
self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
return False
self.storage['dynamic'] = {
- 'http_code':
- {'chart': 'squid_detailed_response_codes',
+ 'http_code': {
+ 'chart': 'squid_detailed_response_codes',
'func_dim_id': None,
- 'func_dim': None},
+ 'func_dim': None
+ },
'hier_code': {
'chart': 'squid_hier_code',
'func_dim_id': None,
- 'func_dim': lambda v: v.replace('HIER_', '')},
+ 'func_dim': lambda v: v.replace('HIER_', '')
+ },
'method': {
'chart': 'squid_method',
'func_dim_id': None,
- 'func_dim': None},
+ 'func_dim': None
+ },
'mime_type': {
'chart': 'squid_mime_type',
- 'func_dim_id': lambda v: v.split('/')[0],
- 'func_dim': None}}
+ 'func_dim_id': lambda v: str.lower(v) if str.lower(v) in MIME_TYPES else 'unknown',
+ 'func_dim': None
+ }
+ }
if not self.configuration.get('all_time', True):
self.order.remove('squid_clients_all')
return True
@@ -951,6 +1096,7 @@ def get_timings(timings, time):
timings['summary'] += time
timings['count'] += 1
+
def get_hist(index, buckets, time):
"""
:param index: histogram index (Ex. [10, 50, 100, 150, ...])
@@ -964,6 +1110,7 @@ def get_hist(index, buckets, time):
else:
break
+
def address_not_in_pool(pool, address, pool_size):
"""
:param pool: list of ip addresses
diff --git a/conf.d/python.d/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf
index c185f8d85..a67957aef 100644
--- a/conf.d/python.d/web_log.conf
+++ b/collectors/python.d.plugin/web_log/web_log.conf
@@ -84,7 +84,7 @@
# stub_status: 'stub_status' # name(dimension): REGEX to match
# custom_log_format: # define a custom log format
# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+\.\d+) '
-# time_multiplier: 1000000 # type <int> - convert time to microseconds
+# time_multiplier: 1000000 # type <int>/<float> - convert time to microseconds
# histogram: [1,3,10,30,100, ...] # type list of int - Cumulative histogram of response time in milli seconds
# ----------------------------------------------------------------------
@@ -109,6 +109,17 @@
# CustomLog "/var/log/apache2/access.log" netdata
# ----------------------------------------------------------------------
+# VHOST AND PORT
+# if your want to graph the request/sec per virtual host and per port (to check the number of requests in http vs https)
+
+# in apache : (%v gives the hostname, %p the port number)
+# LogFormat "%v %p %h %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata
+#
+# and in this file in apache_vhosts_log section, add :
+# custom_log_format:
+# pattern: '(?P<vhost>[a-zA-Z\d.-_]+) (?P<port>\d+) (?P<address>[\da-f.:]+) \[.*\] "(?P<method>[A-Z]+)[^"]*" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+)'
+
+# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them per web server will run (when they have the same name)