summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--collectors/python.d.plugin/.keep0
-rw-r--r--collectors/python.d.plugin/Makefile.am246
-rw-r--r--collectors/python.d.plugin/README.md225
-rw-r--r--collectors/python.d.plugin/adaptec_raid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md48
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py247
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf53
-rw-r--r--collectors/python.d.plugin/apache/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/apache/README.md59
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py160
-rw-r--r--collectors/python.d.plugin/apache/apache.conf85
-rw-r--r--collectors/python.d.plugin/beanstalk/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md105
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py252
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.conf78
-rw-r--r--collectors/python.d.plugin/bind_rndc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md62
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py254
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.conf110
-rw-r--r--collectors/python.d.plugin/boinc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/boinc/README.md30
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py170
-rw-r--r--collectors/python.d.plugin/boinc/boinc.conf66
-rw-r--r--collectors/python.d.plugin/ceph/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ceph/README.md34
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py344
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf73
-rw-r--r--collectors/python.d.plugin/chrony/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/chrony/README.md33
-rw-r--r--collectors/python.d.plugin/chrony/chrony.chart.py118
-rw-r--r--collectors/python.d.plugin/chrony/chrony.conf77
-rw-r--r--collectors/python.d.plugin/couchdb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/couchdb/README.md37
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py400
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.conf89
-rw-r--r--collectors/python.d.plugin/cpufreq/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/cpufreq/README.md37
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.chart.py115
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.conf41
-rw-r--r--collectors/python.d.plugin/cpuidle/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/cpuidle/README.md13
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.chart.py148
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.conf38
-rw-r--r--collectors/python.d.plugin/dns_query_time/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md12
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py152
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.conf69
-rw-r--r--collectors/python.d.plugin/dnsdist/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md56
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py133
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.conf83
-rw-r--r--collectors/python.d.plugin/dockerd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dockerd/README.md28
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py87
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.conf77
-rw-r--r--collectors/python.d.plugin/dovecot/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dovecot/README.md79
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py144
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.conf98
-rw-r--r--collectors/python.d.plugin/elasticsearch/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md62
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py667
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.conf81
-rw-r--r--collectors/python.d.plugin/example/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/example/README.md5
-rw-r--r--collectors/python.d.plugin/example/example.chart.py49
-rw-r--r--collectors/python.d.plugin/example/example.conf68
-rw-r--r--collectors/python.d.plugin/exim/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/exim/README.md15
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py40
-rw-r--r--collectors/python.d.plugin/exim/exim.conf91
-rw-r--r--collectors/python.d.plugin/fail2ban/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md25
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py206
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.conf68
-rw-r--r--collectors/python.d.plugin/freeradius/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/freeradius/README.md72
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py177
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.conf80
-rw-r--r--collectors/python.d.plugin/go_expvar/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md277
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py254
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.conf108
-rw-r--r--collectors/python.d.plugin/haproxy/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/haproxy/README.md51
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py363
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf83
-rw-r--r--collectors/python.d.plugin/hddtemp/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md24
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py101
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.conf95
-rw-r--r--collectors/python.d.plugin/httpcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md43
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py124
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf104
-rw-r--r--collectors/python.d.plugin/icecast/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/icecast/README.md28
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py95
-rw-r--r--collectors/python.d.plugin/icecast/icecast.conf81
-rw-r--r--collectors/python.d.plugin/ipfs/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ipfs/README.md27
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py132
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf77
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md36
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py207
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf79
-rw-r--r--collectors/python.d.plugin/linux_power_supply/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/linux_power_supply/README.md74
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py160
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf79
-rw-r--r--collectors/python.d.plugin/litespeed/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/litespeed/README.md49
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py190
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.conf72
-rw-r--r--collectors/python.d.plugin/logind/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/logind/README.md56
-rw-r--r--collectors/python.d.plugin/logind/logind.chart.py85
-rw-r--r--collectors/python.d.plugin/logind/logind.conf60
-rw-r--r--collectors/python.d.plugin/mdstat/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mdstat/README.md33
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.chart.py205
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.conf30
-rw-r--r--collectors/python.d.plugin/megacli/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/megacli/README.md50
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py279
-rw-r--r--collectors/python.d.plugin/megacli/megacli.conf60
-rw-r--r--collectors/python.d.plugin/memcached/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/memcached/README.md71
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py198
-rw-r--r--collectors/python.d.plugin/memcached/memcached.conf90
-rw-r--r--collectors/python.d.plugin/mongodb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mongodb/README.md170
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py727
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.conf82
-rw-r--r--collectors/python.d.plugin/monit/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/monit/README.md35
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py178
-rw-r--r--collectors/python.d.plugin/monit/monit.conf86
-rw-r--r--collectors/python.d.plugin/mysql/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mysql/README.md90
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py621
-rw-r--r--collectors/python.d.plugin/mysql/mysql.conf285
-rw-r--r--collectors/python.d.plugin/nginx/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx/README.md46
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py72
-rw-r--r--collectors/python.d.plugin/nginx/nginx.conf107
-rw-r--r--collectors/python.d.plugin/nginx_plus/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md127
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py488
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.conf85
-rw-r--r--collectors/python.d.plugin/nsd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nsd/README.md56
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py106
-rw-r--r--collectors/python.d.plugin/nsd/nsd.conf91
-rw-r--r--collectors/python.d.plugin/ntpd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ntpd/README.md73
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py386
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.conf89
-rw-r--r--collectors/python.d.plugin/nvidia_smi/Makefile.inc12
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md40
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py374
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf66
-rw-r--r--collectors/python.d.plugin/openldap/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/openldap/README.md59
-rw-r--r--collectors/python.d.plugin/openldap/openldap.chart.py200
-rw-r--r--collectors/python.d.plugin/openldap/openldap.conf72
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md34
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py137
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf97
-rw-r--r--collectors/python.d.plugin/phpfpm/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md41
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py172
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.conf88
-rw-r--r--collectors/python.d.plugin/portcheck/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/portcheck/README.md37
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py158
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.conf74
-rw-r--r--collectors/python.d.plugin/postfix/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/postfix/README.md17
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py52
-rw-r--r--collectors/python.d.plugin/postfix/postfix.conf72
-rw-r--r--collectors/python.d.plugin/postgres/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/postgres/README.md70
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py1092
-rw-r--r--collectors/python.d.plugin/postgres/postgres.conf124
-rw-r--r--collectors/python.d.plugin/powerdns/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/powerdns/README.md79
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py153
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.conf76
-rw-r--r--collectors/python.d.plugin/proxysql/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/proxysql/README.md64
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.chart.py351
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.conf116
-rw-r--r--collectors/python.d.plugin/puppet/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/puppet/README.md47
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py123
-rw-r--r--collectors/python.d.plugin/puppet/puppet.conf94
-rw-r--r--collectors/python.d.plugin/python.d.conf106
-rw-r--r--collectors/python.d.plugin/python.d.plugin.in427
-rw-r--r--collectors/python.d.plugin/python_modules/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py89
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py80
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py161
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py252
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py311
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py153
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py394
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py145
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loaders.py83
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py206
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/__init__.py316
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/constructor.py676
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/emitter.py1141
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/reader.py191
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/representer.py485
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/scanner.py1458
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/__init__.py313
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/constructor.py687
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/emitter.py1138
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/reader.py193
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/representer.py375
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/scanner.py1449
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/boinc_client.py515
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/lm_sensors.py327
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/mcrcon.py74
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/monotonic.py171
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/ordereddict.py110
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/__init__.py98
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/_collections.py315
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connection.py374
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connectionpool.py900
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py591
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py344
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py297
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py113
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py458
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py808
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py189
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/exceptions.py247
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/fields.py179
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/filepost.py95
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py5
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py54
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py260
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/six.py852
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py20
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py156
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/poolmanager.py441
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/request.py149
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/response.py623
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/__init__.py55
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/connection.py131
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/request.py119
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/response.py82
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/retry.py402
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/selectors.py582
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py338
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/timeout.py243
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/url.py231
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/wait.py41
-rw-r--r--collectors/python.d.plugin/rabbitmq/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md58
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py185
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf80
-rw-r--r--collectors/python.d.plugin/redis/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/redis/README.md44
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py258
-rw-r--r--collectors/python.d.plugin/redis/redis.conf110
-rw-r--r--collectors/python.d.plugin/rethinkdbs/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md36
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py233
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf76
-rw-r--r--collectors/python.d.plugin/retroshare/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/retroshare/README.md3
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py79
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.conf72
-rw-r--r--collectors/python.d.plugin/samba/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/samba/README.md69
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py135
-rw-r--r--collectors/python.d.plugin/samba/samba.conf60
-rw-r--r--collectors/python.d.plugin/sensors/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/sensors/README.md19
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py165
-rw-r--r--collectors/python.d.plugin/sensors/sensors.conf61
-rw-r--r--collectors/python.d.plugin/smartd_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md103
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py728
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf67
-rw-r--r--collectors/python.d.plugin/spigotmc/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md24
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py123
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf66
-rw-r--r--collectors/python.d.plugin/springboot/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/springboot/README.md124
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py160
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf118
-rw-r--r--collectors/python.d.plugin/squid/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/squid/README.md40
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py124
-rw-r--r--collectors/python.d.plugin/squid/squid.conf167
-rw-r--r--collectors/python.d.plugin/tomcat/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/tomcat/README.md35
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py168
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.conf89
-rw-r--r--collectors/python.d.plugin/tor/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/tor/README.md48
-rw-r--r--collectors/python.d.plugin/tor/tor.chart.py106
-rw-r--r--collectors/python.d.plugin/tor/tor.conf77
-rw-r--r--collectors/python.d.plugin/traefik/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/traefik/README.md55
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py200
-rw-r--r--collectors/python.d.plugin/traefik/traefik.conf77
-rw-r--r--collectors/python.d.plugin/unbound/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/unbound/README.md78
-rw-r--r--collectors/python.d.plugin/unbound/unbound.chart.py279
-rw-r--r--collectors/python.d.plugin/unbound/unbound.conf85
-rw-r--r--collectors/python.d.plugin/uwsgi/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md39
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py177
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.conf92
-rw-r--r--collectors/python.d.plugin/varnish/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/varnish/README.md77
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py262
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf66
-rw-r--r--collectors/python.d.plugin/w1sensor/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md15
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py95
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.conf72
-rw-r--r--collectors/python.d.plugin/web_log/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/web_log/README.md203
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py1196
-rw-r--r--collectors/python.d.plugin/web_log/web_log.conf204
362 files changed, 52200 insertions, 0 deletions
diff --git a/collectors/python.d.plugin/.keep b/collectors/python.d.plugin/.keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/.keep
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
new file mode 100644
index 0000000..3599d9c
--- /dev/null
+++ b/collectors/python.d.plugin/Makefile.am
@@ -0,0 +1,246 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ python.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ python.d.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_python_SCRIPTS = \
+ $(NULL)
+
+dist_python_DATA = \
+ $(NULL)
+
+userpythonconfigdir=$(configdir)/python.d
+dist_userpythonconfig_DATA = \
+ .keep \
+ $(NULL)
+
+pythonconfigdir=$(libconfigdir)/python.d
+dist_pythonconfig_DATA = \
+ $(NULL)
+
+include adaptec_raid/Makefile.inc
+include apache/Makefile.inc
+include beanstalk/Makefile.inc
+include bind_rndc/Makefile.inc
+include boinc/Makefile.inc
+include ceph/Makefile.inc
+include chrony/Makefile.inc
+include couchdb/Makefile.inc
+include cpufreq/Makefile.inc
+include cpuidle/Makefile.inc
+include dnsdist/Makefile.inc
+include dns_query_time/Makefile.inc
+include dockerd/Makefile.inc
+include dovecot/Makefile.inc
+include elasticsearch/Makefile.inc
+include example/Makefile.inc
+include exim/Makefile.inc
+include fail2ban/Makefile.inc
+include freeradius/Makefile.inc
+include go_expvar/Makefile.inc
+include haproxy/Makefile.inc
+include hddtemp/Makefile.inc
+include httpcheck/Makefile.inc
+include icecast/Makefile.inc
+include ipfs/Makefile.inc
+include isc_dhcpd/Makefile.inc
+include linux_power_supply/Makefile.inc
+include litespeed/Makefile.inc
+include logind/Makefile.inc
+include mdstat/Makefile.inc
+include megacli/Makefile.inc
+include memcached/Makefile.inc
+include mongodb/Makefile.inc
+include monit/Makefile.inc
+include mysql/Makefile.inc
+include nginx/Makefile.inc
+include nginx_plus/Makefile.inc
+include nvidia_smi/Makefile.inc
+include nsd/Makefile.inc
+include ntpd/Makefile.inc
+include ovpn_status_log/Makefile.inc
+include openldap/Makefile.inc
+include phpfpm/Makefile.inc
+include portcheck/Makefile.inc
+include postfix/Makefile.inc
+include postgres/Makefile.inc
+include powerdns/Makefile.inc
+include proxysql/Makefile.inc
+include puppet/Makefile.inc
+include rabbitmq/Makefile.inc
+include redis/Makefile.inc
+include rethinkdbs/Makefile.inc
+include retroshare/Makefile.inc
+include samba/Makefile.inc
+include sensors/Makefile.inc
+include smartd_log/Makefile.inc
+include spigotmc/Makefile.inc
+include springboot/Makefile.inc
+include squid/Makefile.inc
+include tomcat/Makefile.inc
+include tor/Makefile.inc
+include traefik/Makefile.inc
+include unbound/Makefile.inc
+include uwsgi/Makefile.inc
+include varnish/Makefile.inc
+include w1sensor/Makefile.inc
+include web_log/Makefile.inc
+
+pythonmodulesdir=$(pythondir)/python_modules
+dist_pythonmodules_DATA = \
+ python_modules/__init__.py \
+ $(NULL)
+
+basesdir=$(pythonmodulesdir)/bases
+dist_bases_DATA = \
+ python_modules/bases/__init__.py \
+ python_modules/bases/charts.py \
+ python_modules/bases/collection.py \
+ python_modules/bases/loaders.py \
+ python_modules/bases/loggers.py \
+ $(NULL)
+
+bases_framework_servicesdir=$(basesdir)/FrameworkServices
+dist_bases_framework_services_DATA = \
+ python_modules/bases/FrameworkServices/__init__.py \
+ python_modules/bases/FrameworkServices/ExecutableService.py \
+ python_modules/bases/FrameworkServices/LogService.py \
+ python_modules/bases/FrameworkServices/MySQLService.py \
+ python_modules/bases/FrameworkServices/SimpleService.py \
+ python_modules/bases/FrameworkServices/SocketService.py \
+ python_modules/bases/FrameworkServices/UrlService.py \
+ $(NULL)
+
+third_partydir=$(pythonmodulesdir)/third_party
+dist_third_party_DATA = \
+ python_modules/third_party/__init__.py \
+ python_modules/third_party/ordereddict.py \
+ python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
+ $(NULL)
+
+pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
+dist_pythonyaml2_DATA = \
+ python_modules/pyyaml2/__init__.py \
+ python_modules/pyyaml2/composer.py \
+ python_modules/pyyaml2/constructor.py \
+ python_modules/pyyaml2/cyaml.py \
+ python_modules/pyyaml2/dumper.py \
+ python_modules/pyyaml2/emitter.py \
+ python_modules/pyyaml2/error.py \
+ python_modules/pyyaml2/events.py \
+ python_modules/pyyaml2/loader.py \
+ python_modules/pyyaml2/nodes.py \
+ python_modules/pyyaml2/parser.py \
+ python_modules/pyyaml2/reader.py \
+ python_modules/pyyaml2/representer.py \
+ python_modules/pyyaml2/resolver.py \
+ python_modules/pyyaml2/scanner.py \
+ python_modules/pyyaml2/serializer.py \
+ python_modules/pyyaml2/tokens.py \
+ $(NULL)
+
+pythonyaml3dir=$(pythonmodulesdir)/pyyaml3
+dist_pythonyaml3_DATA = \
+ python_modules/pyyaml3/__init__.py \
+ python_modules/pyyaml3/composer.py \
+ python_modules/pyyaml3/constructor.py \
+ python_modules/pyyaml3/cyaml.py \
+ python_modules/pyyaml3/dumper.py \
+ python_modules/pyyaml3/emitter.py \
+ python_modules/pyyaml3/error.py \
+ python_modules/pyyaml3/events.py \
+ python_modules/pyyaml3/loader.py \
+ python_modules/pyyaml3/nodes.py \
+ python_modules/pyyaml3/parser.py \
+ python_modules/pyyaml3/reader.py \
+ python_modules/pyyaml3/representer.py \
+ python_modules/pyyaml3/resolver.py \
+ python_modules/pyyaml3/scanner.py \
+ python_modules/pyyaml3/serializer.py \
+ python_modules/pyyaml3/tokens.py \
+ $(NULL)
+
+python_urllib3dir=$(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir=$(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir=$(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir=$(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
new file mode 100644
index 0000000..8955197
--- /dev/null
+++ b/collectors/python.d.plugin/README.md
@@ -0,0 +1,225 @@
+# python.d.plugin
+
+`python.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by netdata
+3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
+
+## Disclaimer
+
+Every module should be compatible with python2 and python3.
+All third party libraries should be installed system-wide or in `python_modules` directory.
+Module configurations are written in YAML and **pyYAML is required**.
+
+Every configuration file must have one of two formats:
+
+- Configuration for only one job:
+
+```yaml
+update_every : 2 # update frequency
+priority : 20000 # where it is shown on dashboard
+
+other_var1 : bla # variables passed to module
+other_var2 : alb
+```
+
+- Configuration for many jobs (ex. mysql):
+
+```yaml
+# module defaults:
+update_every : 2
+priority : 20000
+
+local: # job name
+ update_every : 5 # job update frequency
+ other_var1 : some_val # module specific variable
+
+other_job:
+ priority : 5 # job position on dashboard
+ other_var2 : val # module specific variable
+```
+
+`update_every` and `priority` are always optional.
+
+## How to debug a python module
+
+```
+# become user netdata
+sudo su -s /bin/bash netdata
+```
+Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module:
+
+```
+# execute the plugin in debug mode, for a specific module
+/opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+```
+Where `[module]` is the directory name under https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin
+
+## How to write a new module
+
+Writing new python module is simple. You just need to remember to include 5 major things:
+- **ORDER** global list
+- **CHART** global dictionary
+- **Service** class
+- **_get_data** method
+- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**)
+
+If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](#pull-request-checklist-for-python-plugins) beforehand to make sure you have updated all the files you need to.
+
+For a quick start, you can look at the [example plugin](example/example.chart.py).
+
+### Global variables `ORDER` and `CHART`
+
+`ORDER` list should contain the order of chart ids. Example:
+```py
+ORDER = ['first_chart', 'second_chart', 'third_chart']
+```
+
+`CHART` dictionary is a little bit trickier. It should contain the chart definition in following format:
+```py
+CHART = {
+ id: {
+ 'options': [name, title, units, family, context, charttype],
+ 'lines': [
+ [unique_dimension_name, name, algorithm, multiplier, divisor]
+ ]}
+```
+
+All names are better explained in the [External Plugins](../) section.
+Parameters like `priority` and `update_every` are handled by `python.d.plugin`.
+
+### `Service` class
+
+Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes:
+
+- `SimpleService`
+- `UrlService`
+- `SocketService`
+- `LogService`
+- `ExecutableService`
+
+Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables.
+
+Simple example:
+```py
+from base import UrlService
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+```
+
+### `_get_data` collector/parser
+
+This method should grab raw data from `_get_raw_data`, parse it, and return a dictionary where keys are unique dimension names or `None` if no data is collected.
+
+Example:
+```py
+def _get_data(self):
+ try:
+ raw = self._get_raw_data().split(" ")
+ return {'active': int(raw[2])}
+ except (ValueError, AttributeError):
+ return None
+```
+
+More about framework classes
+============================
+
+Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor.
+
+If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example:
+```py
+def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ try:
+ self.baseurl = str(self.configuration['baseurl'])
+ except (KeyError, TypeError):
+ self.baseurl = "http://localhost:5001"
+```
+
+Classes implement `_get_raw_data` which should be used to grab raw data. This method usually returns a list of strings.
+
+### `SimpleService`
+
+_This is last resort class, if a new module cannot be written by using other framework class this one can be used._
+
+_Example: `mysql`, `sensors`_
+
+It is the lowest-level class which implements most of module logic, like:
+- threading
+- handling run times
+- chart formatting
+- logging
+- chart creation and updating
+
+### `LogService`
+
+_Examples: `apache_cache`, `nginx_log`_
+
+_Variable from config file_: `log_path`.
+
+Object created from this class reads new lines from file specified in `log_path` variable. It will check if file exists and is readable. Also `_get_raw_data` returns list of strings where each string is one line from file specified in `log_path`.
+
+### `ExecutableService`
+
+_Examples: `exim`, `postfix`_
+
+_Variable from config file_: `command`.
+
+This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of:
+- '&'
+- '|'
+- ';'
+- '>'
+- '<'
+
+For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`.
+
+`_get_raw_data` returns list of decoded lines returned by `command`.
+
+### UrlService
+
+_Examples: `apache`, `nginx`, `tomcat`_
+
+_Variables from config file_: `url`, `user`, `pass`.
+
+If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials.
+
+`_get_raw_data` returns list of utf-8 decoded strings (lines).
+
+### SocketService
+
+_Examples: `dovecot`, `redis`_
+
+_Variables from config file_: `unix_socket`, `host`, `port`, `request`.
+
+Object will try execute `request` using either `unix_socket` or TCP/IP socket with combination of `host` and `port`. This can access unix sockets with SOCK_STREAM or SOCK_DGRAM protocols and TCP/IP sockets in version 4 and 6 with SOCK_STREAM setting.
+
+Sockets are accessed in non-blocking mode with 15 second timeout.
+
+After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method.
+
+`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way.
+
+## Pull Request Checklist for Python Plugins
+
+This is a generic checklist for submitting a new Python plugin for Netdata. It is by no means comprehensive.
+
+At minimum, to be buildable and testable, the PR needs to include:
+
+* The module itself, following proper naming conventions: `python.d/<module_dir>/<module_name>.chart.py`
+* A README.md file for the plugin under `python.d/<module_dir>`.
+* The configuration file for the module: `conf.d/python.d/<module_name>.conf`. Python config files are in YAML format, and should include comments describing what options are present. The instructions are also needed in the configuration section of the README.md
+* A basic configuration for the plugin in the appropriate global config file: `conf.d/python.d.conf`, which is also in YAML format. Either add a line that reads `# <module_name>: yes` if the module is to be enabled by default, or one that reads `<module_name>: no` if it is to be disabled by default.
+* A line for the plugin in `python.d/Makefile.am` under `dist_python_DATA`.
+* A line for the plugin configuration file in `conf.d/Makefile.am`, under `dist_pythonconfig_DATA`
+* Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/adaptec_raid/Makefile.inc b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
new file mode 100644
index 0000000..716cdb2
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += adaptec_raid/adaptec_raid.chart.py
+dist_pythonconfig_DATA += adaptec_raid/adaptec_raid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += adaptec_raid/README.md adaptec_raid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
new file mode 100644
index 0000000..682280f
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -0,0 +1,48 @@
+# adaptec raid
+
+Module collects logical and physical devices health metrics.
+
+**Requirements:**
+* `arcconf` program
+* `sudo` program
+* `netdata` user needs to be able to sudo the `arcconf` program without password
+
+To grab stats it executes:
+ * `sudo -n arcconf GETCONFIG 1 LD`
+ * `sudo -n arcconf GETCONFIG 1 PD`
+
+
+It produces:
+
+1. **Logical Device Status**
+
+2. **Physical Device State**
+
+3. **Physical Device S.M.A.R.T warnings**
+
+4. **Physical Device Temperature**
+
+### prerequisite
+This module uses `arcconf` which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `arcconf` as root without password.
+
+Add to `sudoers`:
+
+ netdata ALL=(root) NOPASSWD: /path/to/arcconf
+
+### configuration
+
+ **adaptec_raid** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+
+```yaml
+adaptec_raid: yes
+```
+
+#### Screenshot:
+
+![image](https://user-images.githubusercontent.com/22274335/47278133-6d306680-d601-11e8-87c2-cc9c0f42d686.png)
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fadaptec_raid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
new file mode 100644
index 0000000..1fb1e43
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Description: adaptec_raid netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from copy import deepcopy
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+
+disabled_by_default = True
+
+update_every = 5
+
+ORDER = [
+ 'ld_status',
+ 'pd_state',
+ 'pd_smart_warnings',
+ 'pd_temperature',
+]
+
+CHARTS = {
+ 'ld_status': {
+ 'options': [None, 'Status Is Not OK', 'bool', 'logical devices', 'adapter_raid.ld_status', 'line'],
+ 'lines': []
+ },
+ 'pd_state': {
+ 'options': [None, 'State Is Not OK', 'bool', 'physical devices', 'adapter_raid.pd_state', 'line'],
+ 'lines': []
+ },
+ 'pd_smart_warnings': {
+ 'options': [None, 'S.M.A.R.T warnings', 'count', 'physical devices',
+ 'adapter_raid.smart_warnings', 'line'],
+ 'lines': []
+ },
+ 'pd_temperature': {
+ 'options': [None, 'Temperature', 'celsius', 'physical devices', 'adapter_raid.temperature', 'line'],
+ 'lines': []
+ },
+}
+
+SUDO = 'sudo'
+ARCCONF = 'arcconf'
+
+BAD_LD_STATUS = (
+ 'Degraded',
+ 'Failed',
+)
+
+GOOD_PD_STATUS = (
+ 'Online',
+)
+
+RE_LD = re.compile(
+ r'Logical device number\s+([0-9]+).*?'
+ r'Status of logical device\s+: ([a-zA-Z]+)'
+)
+
+
+def find_lds(d):
+ d = ' '.join(v.strip() for v in d)
+ return [LD(*v) for v in RE_LD.findall(d)]
+
+
+def find_pds(d):
+ pds = list()
+ pd = PD()
+
+ for row in d:
+ row = row.strip()
+ if row.startswith('Device #'):
+ pd = PD()
+ pd.id = row.split('#')[-1]
+ elif not pd.id:
+ continue
+
+ if row.startswith('State'):
+ v = row.split()[-1]
+ pd.state = v
+ elif row.startswith('S.M.A.R.T. warnings'):
+ v = row.split()[-1]
+ pd.smart_warnings = v
+ elif row.startswith('Temperature'):
+ v = row.split(':')[-1].split()[0]
+ pd.temperature = v
+ elif row.startswith('NCQ status'):
+ if pd.id and pd.state and pd.smart_warnings:
+ pds.append(pd)
+ pd = PD()
+
+ return pds
+
+
+class LD:
+ def __init__(self, ld_id, status):
+ self.id = ld_id
+ self.status = status
+
+ def data(self):
+ return {
+ 'ld_{0}_status'.format(self.id): int(self.status in BAD_LD_STATUS)
+ }
+
+
+class PD:
+ def __init__(self):
+ self.id = None
+ self.state = None
+ self.smart_warnings = None
+ self.temperature = None
+
+ def data(self):
+ data = {
+ 'pd_{0}_state'.format(self.id): int(self.state not in GOOD_PD_STATUS),
+ 'pd_{0}_smart_warnings'.format(self.id): self.smart_warnings,
+ }
+ if self.temperature and self.temperature.isdigit():
+ data['pd_{0}_temperature'.format(self.id)] = self.temperature
+
+ return data
+
+
+class Arcconf:
+ def __init__(self, arcconf):
+ self.arcconf = arcconf
+
+ def ld_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'LD']
+
+ def pd_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'PD']
+
+
+# TODO: hardcoded sudo...
+class SudoArcconf:
+ def __init__(self, arcconf, sudo):
+ self.arcconf = Arcconf(arcconf)
+ self.sudo = sudo
+
+ def ld_info(self):
+ return [self.sudo, '-n'] + self.arcconf.ld_info()
+
+ def pd_info(self):
+ return [self.sudo, '-n'] + self.arcconf.pd_info()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.use_sudo = self.configuration.get('use_sudo', True)
+ self.arcconf = None
+
+ def execute(self, command, stderr=False):
+ return self._get_raw_data(command=command, stderr=stderr)
+
+ def check(self):
+ arcconf = find_binary(ARCCONF)
+ if not arcconf:
+ self.error('can\'t locate "{0}" binary'.format(ARCCONF))
+ return False
+
+ sudo = find_binary(SUDO)
+ if self.use_sudo:
+ if not sudo:
+ self.error('can\'t locate "{0}" binary'.format(SUDO))
+ return False
+ err = self.execute([sudo, '-n', '-v'], True)
+ if err:
+ self.error(' '.join(err))
+ return False
+
+ if self.use_sudo:
+ self.arcconf = SudoArcconf(arcconf, sudo)
+ else:
+ self.arcconf = Arcconf(arcconf)
+
+ lds = self.get_lds()
+ if not lds:
+ return False
+
+ self.debug('discovered logical devices ids: {0}'.format([ld.id for ld in lds]))
+
+ pds = self.get_pds()
+ if not pds:
+ return False
+
+ self.debug('discovered physical devices ids: {0}'.format([pd.id for pd in pds]))
+
+ self.update_charts(lds, pds)
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ for ld in self.get_lds():
+ data.update(ld.data())
+
+ for pd in self.get_pds():
+ data.update(pd.data())
+
+ return data
+
+ def get_lds(self):
+ raw_lds = self.execute(self.arcconf.ld_info())
+ if not raw_lds:
+ return None
+
+ lds = find_lds(raw_lds)
+ if not lds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.ld_info())))
+ self.debug('output: {0}'.format(raw_lds))
+ return None
+ return lds
+
+ def get_pds(self):
+ raw_pds = self.execute(self.arcconf.pd_info())
+ if not raw_pds:
+ return None
+
+ pds = find_pds(raw_pds)
+ if not pds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.pd_info())))
+ self.debug('output: {0}'.format(raw_pds))
+ return None
+ return pds
+
+ def update_charts(self, lds, pds):
+ charts = self.definitions
+ for ld in lds:
+ dim = ['ld_{0}_status'.format(ld.id), 'ld {0}'.format(ld.id)]
+ charts['ld_status']['lines'].append(dim)
+
+ for pd in pds:
+ dim = ['pd_{0}_state'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_state']['lines'].append(dim)
+
+ dim = ['pd_{0}_smart_warnings'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_smart_warnings']['lines'].append(dim)
+
+ dim = ['pd_{0}_temperature'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_temperature']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
new file mode 100644
index 0000000..fa462ec
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
@@ -0,0 +1,53 @@
+# netdata python.d.plugin configuration for adaptec raid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/apache/Makefile.inc b/collectors/python.d.plugin/apache/Makefile.inc
new file mode 100644
index 0000000..70a4215
--- /dev/null
+++ b/collectors/python.d.plugin/apache/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += apache/apache.chart.py
+dist_pythonconfig_DATA += apache/apache.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += apache/README.md apache/Makefile.inc
+
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
new file mode 100644
index 0000000..090feb0
--- /dev/null
+++ b/collectors/python.d.plugin/apache/README.md
@@ -0,0 +1,59 @@
+# apache
+
+This module will monitor one or more Apache servers depending on configuration.
+
+**Requirements:**
+ * apache with enabled `mod_status`
+
+It produces the following charts:
+
+1. **Requests** in requests/s
+ * requests
+
+2. **Connections**
+ * connections
+
+3. **Async Connections**
+ * keepalive
+ * closing
+ * writing
+
+4. **Bandwidth** in kilobytes/s
+ * sent
+
+5. **Workers**
+ * idle
+ * busy
+
+6. **Lifetime Avg. Requests/s** in requests/s
+ * requests_sec
+
+7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
+ * size_sec
+
+8. **Lifetime Avg. Response Size** in bytes/request
+ * size_req
+
+### configuration
+
+Needs only `url` to server's `server-status?auto`
+
+Here is an example for 2 servers:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/server-status?auto'
+
+remote:
+ url : 'http://www.apache.org/server-status?auto'
+ update_every : 5
+```
+
+Without configuration, module attempts to connect to `http://localhost/server-status?auto`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fapache%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/apache/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
new file mode 100644
index 0000000..655616d
--- /dev/null
+++ b/collectors/python.d.plugin/apache/apache.chart.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Description: apache netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'requests',
+ 'connections',
+ 'conns_async',
+ 'net',
+ 'workers',
+ 'reqpersec',
+ 'bytespersec',
+ 'bytesperreq',
+]
+
+CHARTS = {
+ 'bytesperreq': {
+ 'options': [None, 'Lifetime Avg. Request Size', 'KiB',
+ 'statistics', 'apache.bytesperreq', 'area'],
+ 'lines': [
+ ['size_req', 'size', 'absolute', 1, 1024 * 100000]
+ ]},
+ 'workers': {
+ 'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
+ 'lines': [
+ ['idle'],
+ ['busy'],
+ ]},
+ 'reqpersec': {
+ 'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics',
+ 'apache.reqpersec', 'area'],
+ 'lines': [
+ ['requests_sec', 'requests', 'absolute', 1, 100000]
+ ]},
+ 'bytespersec': {
+ 'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
+ 'apache.bytesperreq', 'area'],
+ 'lines': [
+ ['size_sec', None, 'absolute', 8, 1000 * 100000]
+ ]},
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'incremental']
+ ]},
+ 'net': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
+ 'lines': [
+ ['sent', None, 'incremental', 8, 1]
+ ]},
+ 'connections': {
+ 'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'],
+ 'lines': [
+ ['connections']
+ ]},
+ 'conns_async': {
+ 'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
+ 'lines': [
+ ['keepalive'],
+ ['closing'],
+ ['writing']
+ ]}
+}
+
+ASSIGNMENT = {
+ 'BytesPerReq': 'size_req',
+ 'IdleWorkers': 'idle',
+ 'IdleServers': 'idle_servers',
+ 'BusyWorkers': 'busy',
+ 'BusyServers': 'busy_servers',
+ 'ReqPerSec': 'requests_sec',
+ 'BytesPerSec': 'size_sec',
+ 'Total Accesses': 'requests',
+ 'Total kBytes': 'sent',
+ 'ConnsTotal': 'connections',
+ 'ConnsAsyncKeepAlive': 'keepalive',
+ 'ConnsAsyncClosing': 'closing',
+ 'ConnsAsyncWriting': 'writing'
+}
+
+FLOAT_VALUES = [
+ 'BytesPerReq',
+ 'ReqPerSec',
+ 'BytesPerSec',
+]
+
+LIGHTTPD_MARKER = 'idle_servers'
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://localhost/server-status?auto')
+
+ def check(self):
+ self._manager = self._build_manager()
+
+ data = self._get_data()
+
+ if not data:
+ return None
+
+ if LIGHTTPD_MARKER in data:
+ self.turn_into_lighttpd()
+
+ return True
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+
+ if not raw_data:
+ return None
+
+ data = dict()
+
+ for line in raw_data.split('\n'):
+ try:
+ parse_line(line, data)
+ except ValueError:
+ continue
+
+ return data or None
+
+ def turn_into_lighttpd(self):
+ self.module_name = 'lighttpd'
+ for chart in self.definitions:
+ if chart == 'workers':
+ lines = self.definitions[chart]['lines']
+ lines[0] = ['idle_servers', 'idle']
+ lines[1] = ['busy_servers', 'busy']
+ opts = self.definitions[chart]['options']
+ opts[1] = opts[1].replace('apache', 'lighttpd')
+ opts[4] = opts[4].replace('apache', 'lighttpd')
+
+
+def parse_line(line, data):
+ parts = line.split(':')
+
+ if len(parts) != 2:
+ return
+
+ key, value = parts[0], parts[1]
+
+ if key not in ASSIGNMENT:
+ return
+
+ if key in FLOAT_VALUES:
+ data[ASSIGNMENT[key]] = int((float(value) * 100000))
+ else:
+ data[ASSIGNMENT[key]] = int(value)
diff --git a/collectors/python.d.plugin/apache/apache.conf b/collectors/python.d.plugin/apache/apache.conf
new file mode 100644
index 0000000..84e12a5
--- /dev/null
+++ b/collectors/python.d.plugin/apache/apache.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for apache
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, apache also supports the following:
+#
+# url: 'URL' # the URL to fetch apache's mod_status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/server-status?auto'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/server-status?auto'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/server-status?auto'
diff --git a/collectors/python.d.plugin/beanstalk/Makefile.inc b/collectors/python.d.plugin/beanstalk/Makefile.inc
new file mode 100644
index 0000000..4bbb708
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += beanstalk/beanstalk.chart.py
+dist_pythonconfig_DATA += beanstalk/beanstalk.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += beanstalk/README.md beanstalk/Makefile.inc
+
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
new file mode 100644
index 0000000..8daa366
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -0,0 +1,105 @@
+# beanstalk
+
+Module provides server and tube-level statistics:
+
+**Requirements:**
+ * `python-beanstalkc`
+
+**Server statistics:**
+
+1. **Cpu usage** in cpu time
+ * user
+ * system
+
+2. **Jobs rate** in jobs/s
+ * total
+ * timeouts
+
+3. **Connections rate** in connections/s
+ * connections
+
+4. **Commands rate** in commands/s
+ * put
+ * peek
+ * peek-ready
+ * peek-delayed
+ * peek-buried
+ * reserve
+ * use
+ * watch
+ * ignore
+ * delete
+ * release
+ * bury
+ * kick
+ * stats
+ * stats-job
+ * stats-tube
+ * list-tubes
+ * list-tube-used
+ * list-tubes-watched
+ * pause-tube
+
+5. **Current tubes** in tubes
+ * tubes
+
+6. **Current jobs** in jobs
+ * urgent
+ * ready
+ * reserved
+ * delayed
+ * buried
+
+7. **Current connections** in connections
+ * written
+ * producers
+ * workers
+ * waiting
+
+8. **Binlog** in records/s
+ * written
+ * migrated
+
+9. **Uptime** in seconds
+ * uptime
+
+**Per tube statistics:**
+
+1. **Jobs rate** in jobs/s
+ * jobs
+
+2. **Jobs** in jobs
+ * using
+ * ready
+ * reserved
+ * delayed
+ * buried
+
+3. **Connections** in connections
+ * using
+ * waiting
+ * watching
+
+4. **Commands** in commands/s
+ * deletes
+ * pauses
+
+5. **Pause** in seconds
+ * since
+ * left
+
+
+### configuration
+
+Sample:
+
+```yaml
+host : '127.0.0.1'
+port : 11300
+```
+
+If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbeanstalk%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
new file mode 100644
index 0000000..ed945a7
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+# Description: beanstalk netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import beanstalkc
+ BEANSTALKC = True
+except ImportError:
+ BEANSTALKC = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.loaders import safe_load
+
+
+ORDER = [
+ 'cpu_usage',
+ 'jobs_rate',
+ 'connections_rate',
+ 'commands_rate',
+ 'current_tubes',
+ 'current_jobs',
+ 'current_connections',
+ 'binlog',
+ 'uptime',
+]
+
+CHARTS = {
+ 'cpu_usage': {
+ 'options': [None, 'Cpu Usage', 'cpu time', 'server statistics', 'beanstalk.cpu_usage', 'area'],
+ 'lines': [
+ ['rusage-utime', 'user', 'incremental'],
+ ['rusage-stime', 'system', 'incremental']
+ ]
+ },
+ 'jobs_rate': {
+ 'options': [None, 'Jobs Rate', 'jobs/s', 'server statistics', 'beanstalk.jobs_rate', 'line'],
+ 'lines': [
+ ['total-jobs', 'total', 'incremental'],
+ ['job-timeouts', 'timeouts', 'incremental']
+ ]
+ },
+ 'connections_rate': {
+ 'options': [None, 'Connections Rate', 'connections/s', 'server statistics', 'beanstalk.connections_rate',
+ 'area'],
+ 'lines': [
+ ['total-connections', 'connections', 'incremental']
+ ]
+ },
+ 'commands_rate': {
+ 'options': [None, 'Commands Rate', 'commands/s', 'server statistics', 'beanstalk.commands_rate', 'stacked'],
+ 'lines': [
+ ['cmd-put', 'put', 'incremental'],
+ ['cmd-peek', 'peek', 'incremental'],
+ ['cmd-peek-ready', 'peek-ready', 'incremental'],
+ ['cmd-peek-delayed', 'peek-delayed', 'incremental'],
+ ['cmd-peek-buried', 'peek-buried', 'incremental'],
+ ['cmd-reserve', 'reserve', 'incremental'],
+ ['cmd-use', 'use', 'incremental'],
+ ['cmd-watch', 'watch', 'incremental'],
+ ['cmd-ignore', 'ignore', 'incremental'],
+ ['cmd-delete', 'delete', 'incremental'],
+ ['cmd-release', 'release', 'incremental'],
+ ['cmd-bury', 'bury', 'incremental'],
+ ['cmd-kick', 'kick', 'incremental'],
+ ['cmd-stats', 'stats', 'incremental'],
+ ['cmd-stats-job', 'stats-job', 'incremental'],
+ ['cmd-stats-tube', 'stats-tube', 'incremental'],
+ ['cmd-list-tubes', 'list-tubes', 'incremental'],
+ ['cmd-list-tube-used', 'list-tube-used', 'incremental'],
+ ['cmd-list-tubes-watched', 'list-tubes-watched', 'incremental'],
+ ['cmd-pause-tube', 'pause-tube', 'incremental']
+ ]
+ },
+ 'current_tubes': {
+ 'options': [None, 'Current Tubes', 'tubes', 'server statistics', 'beanstalk.current_tubes', 'area'],
+ 'lines': [
+ ['current-tubes', 'tubes']
+ ]
+ },
+ 'current_jobs': {
+ 'options': [None, 'Current Jobs', 'jobs', 'server statistics', 'beanstalk.current_jobs', 'stacked'],
+ 'lines': [
+ ['current-jobs-urgent', 'urgent'],
+ ['current-jobs-ready', 'ready'],
+ ['current-jobs-reserved', 'reserved'],
+ ['current-jobs-delayed', 'delayed'],
+ ['current-jobs-buried', 'buried']
+ ]
+ },
+ 'current_connections': {
+ 'options': [None, 'Current Connections', 'connections', 'server statistics',
+ 'beanstalk.current_connections', 'line'],
+ 'lines': [
+ ['current-connections', 'written'],
+ ['current-producers', 'producers'],
+ ['current-workers', 'workers'],
+ ['current-waiting', 'waiting']
+ ]
+ },
+ 'binlog': {
+ 'options': [None, 'Binlog', 'records/s', 'server statistics', 'beanstalk.binlog', 'line'],
+ 'lines': [
+ ['binlog-records-written', 'written', 'incremental'],
+ ['binlog-records-migrated', 'migrated', 'incremental']
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'],
+ 'lines': [
+ ['uptime'],
+ ]
+ }
+}
+
+
+def tube_chart_template(name):
+ order = [
+ '{0}_jobs_rate'.format(name),
+ '{0}_jobs'.format(name),
+ '{0}_connections'.format(name),
+ '{0}_commands'.format(name),
+ '{0}_pause'.format(name)
+ ]
+ family = 'tube {0}'.format(name)
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'],
+ 'lines': [
+ ['_'.join([name, 'total-jobs']), 'jobs', 'incremental']
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'current-jobs-urgent']), 'urgent'],
+ ['_'.join([name, 'current-jobs-ready']), 'ready'],
+ ['_'.join([name, 'current-jobs-reserved']), 'reserved'],
+ ['_'.join([name, 'current-jobs-delayed']), 'delayed'],
+ ['_'.join([name, 'current-jobs-buried']), 'buried']
+ ]
+ },
+ order[2]: {
+ 'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'current-using']), 'using'],
+ ['_'.join([name, 'current-waiting']), 'waiting'],
+ ['_'.join([name, 'current-watching']), 'watching']
+ ]
+ },
+ order[3]: {
+ 'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'],
+ ['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental']
+ ]
+ },
+ order[4]: {
+ 'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'pause']), 'since'],
+ ['_'.join([name, 'pause-time-left']), 'left']
+ ]
+ }
+ }
+
+ return order, charts
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.configuration = configuration
+ self.order = list(ORDER)
+ self.definitions = dict(CHARTS)
+ self.conn = None
+ self.alive = True
+
+ def check(self):
+ if not BEANSTALKC:
+ self.error("'beanstalkc' module is needed to use beanstalk.chart.py")
+ return False
+
+ self.conn = self.connect()
+
+ return True if self.conn else False
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ if not self.is_alive():
+ return None
+
+ active_charts = self.charts.active_charts()
+ data = dict()
+
+ try:
+ data.update(self.conn.stats())
+
+ for tube in self.conn.tubes():
+ stats = self.conn.stats_tube(tube)
+
+ if tube + '_jobs_rate' not in active_charts:
+ self.create_new_tube_charts(tube)
+
+ for stat in stats:
+ data['_'.join([tube, stat])] = stats[stat]
+
+ except beanstalkc.SocketError:
+ self.alive = False
+ return None
+
+ return data or None
+
+ def create_new_tube_charts(self, tube):
+ order, charts = tube_chart_template(tube)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
+ def connect(self):
+ host = self.configuration.get('host', '127.0.0.1')
+ port = self.configuration.get('port', 11300)
+ timeout = self.configuration.get('timeout', 1)
+ try:
+ return beanstalkc.Connection(host=host,
+ port=port,
+ connect_timeout=timeout,
+ parse_yaml=safe_load)
+ except beanstalkc.SocketError as error:
+ self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error))
+ return None
+
+ def reconnect(self):
+ try:
+ self.conn.reconnect()
+ self.alive = True
+ return True
+ except beanstalkc.SocketError:
+ return False
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf
new file mode 100644
index 0000000..7586ad2
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf
@@ -0,0 +1,78 @@
+# netdata python.d.plugin configuration for beanstalk
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# When a plugin sends the obsolete flag, the charts are not deleted
+# from netdata immediately.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# chart_cleanup: 10
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations
+#
+# Additionally to the above, beanstalk also supports the following:
+#
+# host: 'host' # Server ip address or hostname. Default: 127.0.0.1
+# port: port # Beanstalkd port. Default:
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/bind_rndc/Makefile.inc b/collectors/python.d.plugin/bind_rndc/Makefile.inc
new file mode 100644
index 0000000..72f3914
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += bind_rndc/bind_rndc.chart.py
+dist_pythonconfig_DATA += bind_rndc/bind_rndc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += bind_rndc/README.md bind_rndc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
new file mode 100644
index 0000000..fefe749
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -0,0 +1,62 @@
+# bind_rndc
+
+Module parses bind dump file to collect real-time performance metrics
+
+**Requirements:**
+ * Version of bind must be 9.6 +
+ * Netdata must have permissions to run `rndc stats`
+
+It produces:
+
+1. **Name server statistics**
+ * requests
+ * responses
+ * success
+ * auth_answer
+ * nonauth_answer
+ * nxrrset
+ * failure
+ * nxdomain
+ * recursion
+ * duplicate
+ * rejections
+
+2. **Incoming queries**
+ * RESERVED0
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * MX
+ * TXT
+ * X25
+ * AAAA
+ * SRV
+ * NAPTR
+ * A6
+ * DS
+ * RSIG
+ * DNSKEY
+ * SPF
+ * ANY
+ * DLV
+
+3. **Outgoing queries**
+ * Same as Incoming queries
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ named_stats_path : '/var/log/bind/named.stats'
+```
+
+If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fbind_rndc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
new file mode 100644
index 0000000..7ac1bc3
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+# Description: bind rndc netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from collections import defaultdict
+from subprocess import Popen
+
+from bases.collection import find_binary
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+update_every = 30
+
+ORDER = [
+ 'name_server_statistics',
+ 'incoming_queries',
+ 'outgoing_queries',
+ 'named_stats_size',
+]
+
+CHARTS = {
+ 'name_server_statistics': {
+ 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics',
+ 'bind_rndc.name_server_statistics', 'line'],
+ 'lines': [
+ ['nms_requests', 'requests', 'incremental'],
+ ['nms_rejected_queries', 'rejected_queries', 'incremental'],
+ ['nms_success', 'success', 'incremental'],
+ ['nms_failure', 'failure', 'incremental'],
+ ['nms_responses', 'responses', 'incremental'],
+ ['nms_duplicate', 'duplicate', 'incremental'],
+ ['nms_recursion', 'recursion', 'incremental'],
+ ['nms_nxrrset', 'nxrrset', 'incremental'],
+ ['nms_nxdomain', 'nxdomain', 'incremental'],
+ ['nms_non_auth_answer', 'non_auth_answer', 'incremental'],
+ ['nms_auth_answer', 'auth_answer', 'incremental'],
+ ['nms_dropped_queries', 'dropped_queries', 'incremental'],
+ ]},
+ 'incoming_queries': {
+ 'options': [None, 'Incoming Queries', 'queries', 'incoming queries', 'bind_rndc.incoming_queries', 'line'],
+ 'lines': [
+ ]},
+ 'outgoing_queries': {
+ 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries', 'bind_rndc.outgoing_queries', 'line'],
+ 'lines': [
+ ]},
+ 'named_stats_size': {
+ 'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
+ 'lines': [
+ ['stats_size', None, 'absolute', 1, 1 << 20]
+ ]
+ }
+}
+
+NMS = {
+ 'nms_requests': [
+ 'IPv4 requests received',
+ 'IPv6 requests received',
+ 'TCP requests received',
+ 'requests with EDNS(0) receive'
+ ],
+ 'nms_responses': [
+ 'responses sent',
+ 'truncated responses sent',
+ 'responses with EDNS(0) sent',
+ 'requests with unsupported EDNS version received'
+ ],
+ 'nms_failure': [
+ 'other query failures',
+ 'queries resulted in SERVFAIL'
+ ],
+ 'nms_auth_answer': ['queries resulted in authoritative answer'],
+ 'nms_non_auth_answer': ['queries resulted in non authoritative answer'],
+ 'nms_nxrrset': ['queries resulted in nxrrset'],
+ 'nms_success': ['queries resulted in successful answer'],
+ 'nms_nxdomain': ['queries resulted in NXDOMAIN'],
+ 'nms_recursion': ['queries caused recursion'],
+ 'nms_duplicate': ['duplicate queries received'],
+ 'nms_rejected_queries': [
+ 'auth queries rejected',
+ 'recursive queries rejected'
+ ],
+ 'nms_dropped_queries': ['queries dropped']
+}
+
+STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
+ self.rndc = find_binary('rndc')
+ self.data = dict(
+ nms_requests=0,
+ nms_responses=0,
+ nms_failure=0,
+ nms_auth=0,
+ nms_non_auth=0,
+ nms_nxrrset=0,
+ nms_success=0,
+ nms_nxdomain=0,
+ nms_recursion=0,
+ nms_duplicate=0,
+ nms_rejected_queries=0,
+ nms_dropped_queries=0,
+ )
+
+ def check(self):
+ if not self.rndc:
+ self.error('Can\'t locate "rndc" binary or binary is not executable by netdata')
+ return False
+
+ if not (os.path.isfile(self.named_stats_path) and os.access(self.named_stats_path, os.R_OK)):
+ self.error('Cannot access file %s' % self.named_stats_path)
+ return False
+
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if not run_rndc.returncode:
+ return True
+ self.error('Not enough permissions to run "%s stats"' % self.rndc)
+ return False
+
+ def _get_raw_data(self):
+ """
+ Run 'rndc stats' and read last dump from named.stats
+ :return: dict
+ """
+ result = dict()
+ try:
+ current_size = os.path.getsize(self.named_stats_path)
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if run_rndc.returncode:
+ return None
+ with open(self.named_stats_path) as named_stats:
+ named_stats.seek(current_size)
+ result['stats'] = named_stats.readlines()
+ result['size'] = current_size
+ return result
+ except (OSError, IOError):
+ return None
+
+ def _get_data(self):
+ """
+ Parse data from _get_raw_data()
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+
+ if raw_data is None:
+ return None
+ parsed = dict()
+ for stat in STATS:
+ parsed[stat] = parse_stats(field=stat,
+ named_stats=raw_data['stats'])
+
+ self.data.update(nms_mapper(data=parsed['Name Server Statistics']))
+
+ for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']):
+ parsed_key, chart_name = elem[0], elem[1]
+ for dimension_id, value in queries_mapper(data=parsed[parsed_key],
+ add=chart_name[:9]).items():
+
+ if dimension_id not in self.data:
+ dimension = dimension_id.replace(chart_name[:9], '')
+ if dimension_id not in self.charts[chart_name]:
+ self.charts[chart_name].add_dimension([dimension_id, dimension, 'incremental'])
+
+ self.data[dimension_id] = value
+
+ self.data['stats_size'] = raw_data['size']
+ return self.data
+
+
+def parse_stats(field, named_stats):
+ """
+ :param field: str:
+ :param named_stats: list:
+ :return: dict
+
+ Example:
+ filed: 'Incoming Queries'
+ names_stats (list of lines):
+ ++ Incoming Requests ++
+ 1405660 QUERY
+ 3 NOTIFY
+ ++ Incoming Queries ++
+ 1214961 A
+ 75 NS
+ 2 CNAME
+ 2897 SOA
+ 35544 PTR
+ 14 MX
+ 5822 TXT
+ 145974 AAAA
+ 371 SRV
+ ++ Outgoing Queries ++
+ ...
+
+ result:
+ {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...}
+ """
+ data = dict()
+ ns = iter(named_stats)
+ for line in ns:
+ if field not in line:
+ continue
+ while True:
+ try:
+ line = next(ns)
+ except StopIteration:
+ break
+ if '++' not in line:
+ if '[' in line:
+ continue
+ v, k = line.strip().split(' ', 1)
+ if k not in data:
+ data[k] = 0
+ data[k] += int(v)
+ continue
+ break
+ break
+ return data
+
+
+def nms_mapper(data):
+ """
+ :param data: dict
+ :return: dict(defaultdict)
+ """
+ result = defaultdict(int)
+ for k, v in NMS.items():
+ for elem in v:
+ result[k] += data.get(elem, 0)
+ return result
+
+
+def queries_mapper(data, add):
+ """
+ :param data: dict
+ :param add: str
+ :return: dict
+ """
+ return dict([(add + k, v) for k, v in data.items()])
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
new file mode 100644
index 0000000..3b7e9a2
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
@@ -0,0 +1,110 @@
+# netdata python.d.plugin configuration for bind_rndc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, bind_rndc also supports the following:
+#
+# named_stats_path: 'path to named.stats' # Default: '/var/log/bind/named.stats'
+#------------------------------------------------------------------------------------------------------------------
+# IMPORTANT Information
+#
+# BIND APPEND logs at EVERY RUN. Its NOT RECOMMENDED to set update_every below 30 sec.
+# STRONGLY RECOMMENDED to create a bind-rndc conf file for logrotate
+#
+# To set up your BIND to dump stats do the following:
+#
+# 1. add to 'named.conf.options' options {}:
+# statistics-file "/var/log/bind/named.stats";
+#
+# 2. Create bind/ directory in /var/log
+# cd /var/log/ && mkdir bind
+#
+# 3. Change owner of directory to 'bind' user
+# chown bind bind/
+#
+# 4. RELOAD (NOT restart) BIND
+# systemctl reload bind9.service
+#
+# 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
+#
+#
+# To ALLOW NETDATA TO RUN 'rndc stats' change '/etc/bind/rndc.key' group to netdata
+# chown :netdata rndc.key
+#
+# The last BUT NOT least is to create bind-rndc.conf in logrotate.d/
+# The working one
+# /var/log/bind/named.stats {
+#
+# daily
+# rotate 4
+# compress
+# delaycompress
+# create 0644 bind bind
+# missingok
+# postrotate
+# rndc reload > /dev/null
+# endscript
+# }
+#
+# To test your logrotate conf file run as root:
+#
+# logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/boinc/Makefile.inc b/collectors/python.d.plugin/boinc/Makefile.inc
new file mode 100644
index 0000000..319e19c
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += boinc/boinc.chart.py
+dist_pythonconfig_DATA += boinc/boinc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += boinc/README.md boinc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
new file mode 100644
index 0000000..0f0aa1c
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -0,0 +1,30 @@
+# boinc
+
+This module monitors task counts for the Berkely Open Infrastructure
+Networking Computing (BOINC) distributed computing client using the same
+RPC interface that the BOINC monitoring GUI does.
+
+It provides charts tracking the total number of tasks and active tasks,
+as well as ones tracking each of the possible states for tasks.
+
+### configuration
+
+BOINC requires use of a password to access it's RPC interface. You can
+find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
+
+By default, the module will try to auto-detect the password by looking
+in `/var/lib/boinc` for this file (this is the location most Linux
+distributions use for a system-wide BOINC installation), so things may
+just work without needing configuration for the local system.
+
+You can monitor remote systems as well:
+
+```yaml
+remote:
+ hostname: some-host
+ password: some-password
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fboinc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
new file mode 100644
index 0000000..e10b28c
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+# Description: BOINC netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+from third_party import boinc_client
+
+
+ORDER = [
+ 'tasks',
+ 'states',
+ 'sched_states',
+ 'process_states',
+]
+
+CHARTS = {
+ 'tasks': {
+ 'options': [None, 'Overall Tasks', 'tasks', 'boinc', 'boinc.tasks', 'line'],
+ 'lines': [
+ ['total', 'Total', 'absolute', 1, 1],
+ ['active', 'Active', 'absolute', 1, 1]
+ ]
+ },
+ 'states': {
+ 'options': [None, 'Tasks per State', 'tasks', 'boinc', 'boinc.states', 'line'],
+ 'lines': [
+ ['new', 'New', 'absolute', 1, 1],
+ ['downloading', 'Downloading', 'absolute', 1, 1],
+ ['downloaded', 'Ready to Run', 'absolute', 1, 1],
+ ['comperror', 'Compute Errors', 'absolute', 1, 1],
+ ['uploading', 'Uploading', 'absolute', 1, 1],
+ ['uploaded', 'Uploaded', 'absolute', 1, 1],
+ ['aborted', 'Aborted', 'absolute', 1, 1],
+ ['upload_failed', 'Failed Uploads', 'absolute', 1, 1]
+ ]
+ },
+ 'sched_states': {
+ 'options': [None, 'Tasks per Scheduler State', 'tasks', 'boinc', 'boinc.sched', 'line'],
+ 'lines': [
+ ['uninit_sched', 'Uninitialized', 'absolute', 1, 1],
+ ['preempted', 'Preempted', 'absolute', 1, 1],
+ ['scheduled', 'Scheduled', 'absolute', 1, 1]
+ ]
+ },
+ 'process_states': {
+ 'options': [None, 'Tasks per Process State', 'tasks', 'boinc', 'boinc.process', 'line'],
+ 'lines': [
+ ['uninit_proc', 'Uninitialized', 'absolute', 1, 1],
+ ['executing', 'Executing', 'absolute', 1, 1],
+ ['suspended', 'Suspended', 'absolute', 1, 1],
+ ['aborting', 'Aborted', 'absolute', 1, 1],
+ ['quit', 'Quit', 'absolute', 1, 1],
+ ['copy_pending', 'Copy Pending', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# A simple template used for pre-loading the return dictionary to make
+# the _get_data() method simpler.
+_DATA_TEMPLATE = {
+ 'total': 0,
+ 'active': 0,
+ 'new': 0,
+ 'downloading': 0,
+ 'downloaded': 0,
+ 'comperror': 0,
+ 'uploading': 0,
+ 'uploaded': 0,
+ 'aborted': 0,
+ 'upload_failed': 0,
+ 'uninit_sched': 0,
+ 'preempted': 0,
+ 'scheduled': 0,
+ 'uninit_proc': 0,
+ 'executing': 0,
+ 'suspended': 0,
+ 'aborting': 0,
+ 'quit': 0,
+ 'copy_pending': 0
+}
+
+# Map task states to dimensions
+_TASK_MAP = {
+ boinc_client.ResultState.NEW: 'new',
+ boinc_client.ResultState.FILES_DOWNLOADING: 'downloading',
+ boinc_client.ResultState.FILES_DOWNLOADED: 'downloaded',
+ boinc_client.ResultState.COMPUTE_ERROR: 'comperror',
+ boinc_client.ResultState.FILES_UPLOADING: 'uploading',
+ boinc_client.ResultState.FILES_UPLOADED: 'uploaded',
+ boinc_client.ResultState.ABORTED: 'aborted',
+ boinc_client.ResultState.UPLOAD_FAILED: 'upload_failed'
+}
+
+# Map scheduler states to dimensions
+_SCHED_MAP = {
+ boinc_client.CpuSched.UNINITIALIZED: 'uninit_sched',
+ boinc_client.CpuSched.PREEMPTED: 'preempted',
+ boinc_client.CpuSched.SCHEDULED: 'scheduled',
+}
+
+# Maps process states to dimensions
+_PROC_MAP = {
+ boinc_client.Process.UNINITIALIZED: 'uninit_proc',
+ boinc_client.Process.EXECUTING: 'executing',
+ boinc_client.Process.SUSPENDED: 'suspended',
+ boinc_client.Process.ABORT_PENDING: 'aborted',
+ boinc_client.Process.QUIT_PENDING: 'quit',
+ boinc_client.Process.COPY_PENDING: 'copy_pending'
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 0)
+ self.password = self.configuration.get('password', '')
+ self.client = boinc_client.BoincClient(host=self.host, port=self.port, passwd=self.password)
+ self.alive = False
+
+ def check(self):
+ return self.connect()
+
+ def connect(self):
+ self.client.connect()
+ self.alive = self.client.connected and self.client.authorized
+ return self.alive
+
+ def reconnect(self):
+ # The client class itself actually disconnects existing
+ # connections when it is told to connect, so we don't need to
+ # explicitly disconnect when we're just trying to reconnect.
+ return self.connect()
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+
+ data = dict(_DATA_TEMPLATE)
+
+ try:
+ results = self.client.get_tasks()
+ except socket.error:
+ self.error('Connection is dead')
+ self.alive = False
+ return None
+
+ for task in results:
+ data['total'] += 1
+ data[_TASK_MAP[task.state]] += 1
+ try:
+ if task.active_task:
+ data['active'] += 1
+ data[_SCHED_MAP[task.scheduler_state]] += 1
+ data[_PROC_MAP[task.active_task_state]] += 1
+ except AttributeError:
+ pass
+
+ return data or None
diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf
new file mode 100644
index 0000000..16edf55
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for boinc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, boinc also supports the following:
+#
+# hostname: localhost # The host running the BOINC client
+# port: 31416 # The remote GUI RPC port for BOINC
+# password: '' # The remote GUI RPC password
diff --git a/collectors/python.d.plugin/ceph/Makefile.inc b/collectors/python.d.plugin/ceph/Makefile.inc
new file mode 100644
index 0000000..15b039e
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ceph/ceph.chart.py
+dist_pythonconfig_DATA += ceph/ceph.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ceph/README.md ceph/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
new file mode 100644
index 0000000..1f067c6
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -0,0 +1,34 @@
+# ceph
+
+This module monitors the ceph cluster usage and consuption data of a server.
+
+It produces:
+
+* Cluster statistics (usage, available, latency, objects, read/write rate)
+* OSD usage
+* OSD latency
+* Pool usage
+* Pool read/write operations
+* Pool read/write rate
+* number of objects per pool
+
+**Requirements:**
+
+- `rados` python module
+- Granting read permissions to ceph group from keyring file
+```shell
+# chmod 640 /etc/ceph/ceph.client.admin.keyring
+```
+
+### Configuration
+
+Sample:
+```yaml
+local:
+ config_file: '/etc/ceph/ceph.conf'
+ keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fceph%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
new file mode 100644
index 0000000..45b5262
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -0,0 +1,344 @@
+# -*- coding: utf-8 -*-
+# Description: ceph netdata python.d module
+# Author: Luis Eduardo (lets00)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import rados
+ CEPH = True
+except ImportError:
+ CEPH = False
+
+import json
+import os
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+update_every = 10
+
+ORDER = [
+ 'general_usage',
+ 'general_objects',
+ 'general_bytes',
+ 'general_operations',
+ 'general_latency',
+ 'pool_usage',
+ 'pool_objects',
+ 'pool_read_bytes',
+ 'pool_write_bytes',
+ 'pool_read_operations',
+ 'pool_write_operations',
+ 'osd_usage',
+ 'osd_apply_latency',
+ 'osd_commit_latency'
+]
+
+CHARTS = {
+ 'general_usage': {
+ 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
+ 'lines': [
+ ['general_available', 'avail', 'absolute'],
+ ['general_usage', 'used', 'absolute']
+ ]
+ },
+ 'general_objects': {
+ 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'],
+ 'lines': [
+ ['general_objects', 'cluster', 'absolute']
+ ]
+ },
+ 'general_bytes': {
+ 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
+ 'area'],
+ 'lines': [
+ ['general_read_bytes', 'read', 'absolute', 1, 1024],
+ ['general_write_bytes', 'write', 'absolute', -1, 1024]
+ ]
+ },
+ 'general_operations': {
+ 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations',
+ 'area'],
+ 'lines': [
+ ['general_read_operations', 'read', 'absolute', 1],
+ ['general_write_operations', 'write', 'absolute', -1]
+ ]
+ },
+ 'general_latency': {
+ 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency',
+ 'area'],
+ 'lines': [
+ ['general_apply_latency', 'apply', 'absolute'],
+ ['general_commit_latency', 'commit', 'absolute']
+ ]
+ },
+ 'pool_usage': {
+ 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
+ 'lines': []
+ },
+ 'pool_objects': {
+ 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'],
+ 'lines': []
+ },
+ 'pool_read_bytes': {
+ 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
+ 'lines': []
+ },
+ 'pool_write_bytes': {
+ 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
+ 'lines': []
+ },
+ 'pool_read_operations': {
+ 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'],
+ 'lines': []
+ },
+ 'pool_write_operations': {
+ 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'],
+ 'lines': []
+ },
+ 'osd_usage': {
+ 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
+ 'lines': []
+ },
+ 'osd_apply_latency': {
+ 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'],
+ 'lines': []
+ },
+ 'osd_commit_latency': {
+ 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'],
+ 'lines': []
+ }
+
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.config_file = self.configuration.get('config_file')
+ self.keyring_file = self.configuration.get('keyring_file')
+
+ def check(self):
+ """
+ Checks module
+ :return:
+ """
+ if not CEPH:
+ self.error('rados module is needed to use ceph.chart.py')
+ return False
+ if not (self.config_file and self.keyring_file):
+ self.error('config_file and/or keyring_file is not defined')
+ return False
+
+ # Verify files and permissions
+ if not (os.access(self.config_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.keyring_file))
+ return False
+ if not (os.access(self.config_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file))
+ return False
+ try:
+ self.cluster = rados.Rados(conffile=self.config_file,
+ conf=dict(keyring=self.keyring_file))
+ self.cluster.connect()
+ except rados.Error as error:
+ self.error(error)
+ return False
+ self.create_definitions()
+ return True
+
+ def create_definitions(self):
+ """
+ Create dynamically charts options
+ :return: None
+ """
+ # Pool lines
+ for pool in sorted(self._get_df()['pools']):
+ self.definitions['pool_usage']['lines'].append([pool['name'],
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']),
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
+ self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
+ self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
+
+ # OSD lines
+ for osd in sorted(self._get_osd_df()['nodes']):
+ self.definitions['osd_usage']['lines'].append([osd['name'],
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+
+ def get_data(self):
+ """
+ Catch all ceph data
+ :return: dict
+ """
+ try:
+ data = {}
+ df = self._get_df()
+ osd_df = self._get_osd_df()
+ osd_perf = self._get_osd_perf()
+ pool_stats = self._get_osd_pool_stats()
+ data.update(self._get_general(osd_perf, pool_stats))
+ for pool in df['pools']:
+ data.update(self._get_pool_usage(pool))
+ data.update(self._get_pool_objects(pool))
+ for pool_io in pool_stats:
+ data.update(self._get_pool_rw(pool_io))
+ for osd in osd_df['nodes']:
+ data.update(self._get_osd_usage(osd))
+ for osd_apply_commit in osd_perf['osd_perf_infos']:
+ data.update(self._get_osd_latency(osd_apply_commit))
+ return data
+ except (ValueError, AttributeError) as error:
+ self.error(error)
+ return None
+
+ def _get_general(self, osd_perf, pool_stats):
+ """
+ Get ceph's general usage
+ :return: dict
+ """
+ status = self.cluster.get_cluster_stats()
+ read_bytes_sec = 0
+ write_bytes_sec = 0
+ read_op_per_sec = 0
+ write_op_per_sec = 0
+ apply_latency = 0
+ commit_latency = 0
+
+ for pool_rw_io_b in pool_stats:
+ read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0)
+ write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0)
+ read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0)
+ write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0)
+ for perf in osd_perf['osd_perf_infos']:
+ apply_latency += perf['perf_stats']['apply_latency_ms']
+ commit_latency += perf['perf_stats']['commit_latency_ms']
+
+ return {
+ 'general_usage': int(status['kb_used']),
+ 'general_available': int(status['kb_avail']),
+ 'general_objects': int(status['num_objects']),
+ 'general_read_bytes': read_bytes_sec,
+ 'general_write_bytes': write_bytes_sec,
+ 'general_read_operations': read_op_per_sec,
+ 'general_write_operations': write_op_per_sec,
+ 'general_apply_latency': apply_latency,
+ 'general_commit_latency': commit_latency
+ }
+
+ @staticmethod
+ def _get_pool_usage(pool):
+ """
+ Process raw data into pool usage dict information
+ :return: A pool dict with pool name's key and usage bytes' value
+ """
+ return {pool['name']: pool['stats']['kb_used']}
+
+ @staticmethod
+ def _get_pool_objects(pool):
+ """
+ Process raw data into pool usage dict information
+ :return: A pool dict with pool name's key and object numbers
+ """
+ return {'obj_{0}'.format(pool['name']): pool['stats']['objects']}
+
+ @staticmethod
+ def _get_pool_rw(pool):
+ """
+ Get read/write kb and operations in a pool
+ :return: A pool dict with both read/write bytes and operations.
+ """
+ return {
+ 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
+ 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
+ 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
+ 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
+ }
+
+ @staticmethod
+ def _get_osd_usage(osd):
+ """
+ Process raw data into osd dict information to get osd usage
+ :return: A osd dict with osd name's key and usage bytes' value
+ """
+ return {osd['name']: float(osd['kb_used'])}
+
+ @staticmethod
+ def _get_osd_latency(osd):
+ """
+ Get ceph osd apply and commit latency
+ :return: A osd dict with osd name's key with both apply and commit latency values
+ """
+ return {
+ 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
+ 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']
+ }
+
+ def _get_df(self):
+ """
+ Get ceph df output
+ :return: ceph df --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'df',
+ 'format': 'json'
+ }), '')[1])
+
+ def _get_osd_df(self):
+ """
+ Get ceph osd df output
+ :return: ceph osd df --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd df',
+ 'format': 'json'
+ }), '')[1].replace('-nan', '"-nan"'))
+
+ def _get_osd_perf(self):
+ """
+ Get ceph osd performance
+ :return: ceph osd perf --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd perf',
+ 'format': 'json'
+ }), '')[1])
+
+ def _get_osd_pool_stats(self):
+ """
+ Get ceph osd pool status.
+ This command is used to get information about both
+ read/write operation and bytes per second on each pool
+ :return: ceph osd pool stats --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd pool stats',
+ 'format': 'json'
+ }), '')[1])
diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
new file mode 100644
index 0000000..4caabbf
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/ceph.conf
@@ -0,0 +1,73 @@
+# netdata python.d.plugin configuration for ceph stats
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 10 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, ceph plugin also supports the following:
+#
+# config_file: 'config_file' # Ceph config file.
+# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group
+# # and keyring file must be read group permission.
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+config_file: '/etc/ceph/ceph.conf'
+keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+
diff --git a/collectors/python.d.plugin/chrony/Makefile.inc b/collectors/python.d.plugin/chrony/Makefile.inc
new file mode 100644
index 0000000..18a805b
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += chrony/chrony.chart.py
+dist_pythonconfig_DATA += chrony/chrony.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += chrony/README.md chrony/Makefile.inc
+
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
new file mode 100644
index 0000000..67ed1a0
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -0,0 +1,33 @@
+# chrony
+
+This module monitors the precision and statistics of a local chronyd server.
+
+It produces:
+
+* frequency
+* last offset
+* RMS offset
+* residual freq
+* root delay
+* root dispersion
+* skew
+* system time
+
+**Requirements:**
+Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
+
+### Configuration
+
+Sample:
+```yaml
+# data collection frequency:
+update_every: 1
+
+# chrony query command:
+local:
+ command: 'chronyc -n tracking'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fchrony%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/chrony/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py
new file mode 100644
index 0000000..91f7250
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/chrony.chart.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Description: chrony netdata python.d module
+# Author: Dominik Schloesser (domschl)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+CHRONY_COMMAND = 'chronyc -n tracking'
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'system',
+ 'offsets',
+ 'stratum',
+ 'root',
+ 'frequency',
+ 'residualfreq',
+ 'skew',
+]
+
+CHARTS = {
+ 'system': {
+ 'options': [None, 'Chrony System Time Deltas', 'microseconds', 'system', 'chrony.system', 'area'],
+ 'lines': [
+ ['timediff', 'system time', 'absolute', 1, 1000]
+ ]
+ },
+ 'offsets': {
+ 'options': [None, 'Chrony System Time Offsets', 'microseconds', 'system', 'chrony.offsets', 'area'],
+ 'lines': [
+ ['lastoffset', 'last offset', 'absolute', 1, 1000],
+ ['rmsoffset', 'RMS offset', 'absolute', 1, 1000]
+ ]
+ },
+ 'stratum': {
+ 'options': [None, 'Chrony Stratum', 'stratum', 'root', 'chrony.stratum', 'line'],
+ 'lines': [
+ ['stratum', None, 'absolute', 1, 1]
+ ]
+ },
+ 'root': {
+ 'options': [None, 'Chrony Root Delays', 'milliseconds', 'root', 'chrony.root', 'line'],
+ 'lines': [
+ ['rootdelay', 'delay', 'absolute', 1, 1000000],
+ ['rootdispersion', 'dispersion', 'absolute', 1, 1000000]
+ ]
+ },
+ 'frequency': {
+ 'options': [None, 'Chrony Frequency', 'ppm', 'frequencies', 'chrony.frequency', 'area'],
+ 'lines': [
+ ['frequency', None, 'absolute', 1, 1000]
+ ]
+ },
+ 'residualfreq': {
+ 'options': [None, 'Chrony Residual frequency', 'ppm', 'frequencies', 'chrony.residualfreq', 'area'],
+ 'lines': [
+ ['residualfreq', 'residual frequency', 'absolute', 1, 1000]
+ ]
+ },
+ 'skew': {
+ 'options': [None, 'Chrony Skew, error bound on frequency', 'ppm', 'frequencies', 'chrony.skew', 'area'],
+ 'lines': [
+ ['skew', None, 'absolute', 1, 1000]
+ ]
+ }
+}
+
+CHRONY = [
+ ('Frequency', 'frequency', 1e3),
+ ('Last offset', 'lastoffset', 1e9),
+ ('RMS offset', 'rmsoffset', 1e9),
+ ('Residual freq', 'residualfreq', 1e3),
+ ('Root delay', 'rootdelay', 1e9),
+ ('Root dispersion', 'rootdispersion', 1e9),
+ ('Skew', 'skew', 1e3),
+ ('Stratum', 'stratum', 1),
+ ('System time', 'timediff', 1e9)
+]
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(
+ self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = CHRONY_COMMAND
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ raw_data = (line.split(':', 1) for line in raw_data)
+ parsed, data = dict(), dict()
+
+ for line in raw_data:
+ try:
+ key, value = (l.strip() for l in line)
+ except ValueError:
+ continue
+ if value:
+ parsed[key] = value.split()[0]
+
+ for key, dim_id, multiplier in CHRONY:
+ try:
+ data[dim_id] = int(float(parsed[key]) * multiplier)
+ except (KeyError, ValueError):
+ continue
+
+ return data or None
diff --git a/collectors/python.d.plugin/chrony/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf
new file mode 100644
index 0000000..fd95519
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/chrony.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for chrony
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, chrony also supports the following:
+#
+# command: 'chrony tracking' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# REQUIRED chrony CONFIGURATION
+#
+# netdata will query chrony as user netdata.
+# verify that user netdata is allowed to call 'chronyc tracking'
+# Check cmdallow in chrony.conf
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'chronyc -n tracking'
diff --git a/collectors/python.d.plugin/couchdb/Makefile.inc b/collectors/python.d.plugin/couchdb/Makefile.inc
new file mode 100644
index 0000000..89dfb51
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += couchdb/couchdb.chart.py
+dist_pythonconfig_DATA += couchdb/couchdb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += couchdb/README.md couchdb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
new file mode 100644
index 0000000..2cc353e
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -0,0 +1,37 @@
+# couchdb
+
+This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
+
+* Overall server reads/writes
+* HTTP traffic breakdown
+ * Request methods (`GET`, `PUT`, `POST`, etc.)
+ * Response status codes (`200`, `201`, `4xx`, etc.)
+* Active server tasks
+* Replication status (CouchDB 2.1 and up only)
+* Erlang VM stats
+* Optional per-database statistics: sizes, # of docs, # of deleted docs
+
+### Configuration
+
+Sample for a local server running on port 5984:
+```yaml
+local:
+ user: 'admin'
+ pass: 'password'
+ node: 'couchdb@127.0.0.1'
+```
+
+Be sure to specify a correct admin-level username and password.
+
+You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
+
+If you want per-database statistics, these need to be added to the configuration, separated by spaces:
+```yaml
+local:
+ ...
+ databases: 'db1 db2 db3 ...'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcouchdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/couchdb/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
new file mode 100644
index 0000000..a58694d
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
@@ -0,0 +1,400 @@
+# -*- coding: utf-8 -*-
+# Description: couchdb netdata python.d module
+# Author: wohali <wohali@apache.org>
+# Thanks to l2isbad for good examples :)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import namedtuple, defaultdict
+from json import loads
+from threading import Thread
+from socket import gethostbyname, gaierror
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+update_every = 1
+
+
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
+
+OVERVIEW_STATS = [
+ 'couchdb.database_reads.value',
+ 'couchdb.database_writes.value',
+ 'couchdb.httpd.view_reads.value',
+ 'couchdb.httpd_request_methods.COPY.value',
+ 'couchdb.httpd_request_methods.DELETE.value',
+ 'couchdb.httpd_request_methods.GET.value',
+ 'couchdb.httpd_request_methods.HEAD.value',
+ 'couchdb.httpd_request_methods.OPTIONS.value',
+ 'couchdb.httpd_request_methods.POST.value',
+ 'couchdb.httpd_request_methods.PUT.value',
+ 'couchdb.httpd_status_codes.200.value',
+ 'couchdb.httpd_status_codes.201.value',
+ 'couchdb.httpd_status_codes.202.value',
+ 'couchdb.httpd_status_codes.204.value',
+ 'couchdb.httpd_status_codes.206.value',
+ 'couchdb.httpd_status_codes.301.value',
+ 'couchdb.httpd_status_codes.302.value',
+ 'couchdb.httpd_status_codes.304.value',
+ 'couchdb.httpd_status_codes.400.value',
+ 'couchdb.httpd_status_codes.401.value',
+ 'couchdb.httpd_status_codes.403.value',
+ 'couchdb.httpd_status_codes.404.value',
+ 'couchdb.httpd_status_codes.405.value',
+ 'couchdb.httpd_status_codes.406.value',
+ 'couchdb.httpd_status_codes.409.value',
+ 'couchdb.httpd_status_codes.412.value',
+ 'couchdb.httpd_status_codes.413.value',
+ 'couchdb.httpd_status_codes.414.value',
+ 'couchdb.httpd_status_codes.415.value',
+ 'couchdb.httpd_status_codes.416.value',
+ 'couchdb.httpd_status_codes.417.value',
+ 'couchdb.httpd_status_codes.500.value',
+ 'couchdb.httpd_status_codes.501.value',
+ 'couchdb.open_os_files.value',
+ 'couch_replicator.jobs.running.value',
+ 'couch_replicator.jobs.pending.value',
+ 'couch_replicator.jobs.crashed.value',
+]
+
+SYSTEM_STATS = [
+ 'context_switches',
+ 'run_queue',
+ 'ets_table_count',
+ 'reductions',
+ 'memory.atom',
+ 'memory.atom_used',
+ 'memory.binary',
+ 'memory.code',
+ 'memory.ets',
+ 'memory.other',
+ 'memory.processes',
+ 'io_input',
+ 'io_output',
+ 'os_proc_count',
+ 'process_count',
+ 'internal_replication_jobs'
+]
+
+DB_STATS = [
+ 'doc_count',
+ 'doc_del_count',
+ 'sizes.file',
+ 'sizes.external',
+ 'sizes.active'
+]
+
+ORDER = [
+ 'activity',
+ 'request_methods',
+ 'response_codes',
+ 'active_tasks',
+ 'replicator_jobs',
+ 'open_files',
+ 'db_sizes_file',
+ 'db_sizes_external',
+ 'db_sizes_active',
+ 'db_doc_counts',
+ 'db_doc_del_counts',
+ 'erlang_memory',
+ 'erlang_proc_counts',
+ 'erlang_peak_msg_queue',
+ 'erlang_reductions'
+]
+
+CHARTS = {
+ 'activity': {
+ 'options': [None, 'Overall Activity', 'requests/s',
+ 'dbactivity', 'couchdb.activity', 'stacked'],
+ 'lines': [
+ ['couchdb_database_reads', 'DB reads', 'incremental'],
+ ['couchdb_database_writes', 'DB writes', 'incremental'],
+ ['couchdb_httpd_view_reads', 'View reads', 'incremental']
+ ]
+ },
+ 'request_methods': {
+ 'options': [None, 'HTTP request methods', 'requests/s',
+ 'httptraffic', 'couchdb.request_methods',
+ 'stacked'],
+ 'lines': [
+ ['couchdb_httpd_request_methods_COPY', 'COPY', 'incremental'],
+ ['couchdb_httpd_request_methods_DELETE', 'DELETE', 'incremental'],
+ ['couchdb_httpd_request_methods_GET', 'GET', 'incremental'],
+ ['couchdb_httpd_request_methods_HEAD', 'HEAD', 'incremental'],
+ ['couchdb_httpd_request_methods_OPTIONS', 'OPTIONS',
+ 'incremental'],
+ ['couchdb_httpd_request_methods_POST', 'POST', 'incremental'],
+ ['couchdb_httpd_request_methods_PUT', 'PUT', 'incremental']
+ ]
+ },
+ 'response_codes': {
+ 'options': [None, 'HTTP response status codes', 'responses/s',
+ 'httptraffic', 'couchdb.response_codes',
+ 'stacked'],
+ 'lines': [
+ ['couchdb_httpd_status_codes_200', '200 OK', 'incremental'],
+ ['couchdb_httpd_status_codes_201', '201 Created', 'incremental'],
+ ['couchdb_httpd_status_codes_202', '202 Accepted', 'incremental'],
+ ['couchdb_httpd_status_codes_2xx', 'Other 2xx Success',
+ 'incremental'],
+ ['couchdb_httpd_status_codes_3xx', '3xx Redirection',
+ 'incremental'],
+ ['couchdb_httpd_status_codes_4xx', '4xx Client error',
+ 'incremental'],
+ ['couchdb_httpd_status_codes_5xx', '5xx Server error',
+ 'incremental']
+ ]
+ },
+ 'open_files': {
+ 'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'],
+ 'lines': [
+ ['couchdb_open_os_files', '# files', 'absolute']
+ ]
+ },
+ 'active_tasks': {
+ 'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'],
+ 'lines': [
+ ['activetasks_indexer', 'Indexer', 'absolute'],
+ ['activetasks_database_compaction', 'DB Compaction', 'absolute'],
+ ['activetasks_replication', 'Replication', 'absolute'],
+ ['activetasks_view_compaction', 'View Compaction', 'absolute']
+ ]
+ },
+ 'replicator_jobs': {
+ 'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'],
+ 'lines': [
+ ['couch_replicator_jobs_running', 'Running', 'absolute'],
+ ['couch_replicator_jobs_pending', 'Pending', 'absolute'],
+ ['couch_replicator_jobs_crashed', 'Crashed', 'absolute'],
+ ['internal_replication_jobs', 'Internal replication jobs',
+ 'absolute']
+ ]
+ },
+ 'erlang_memory': {
+ 'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
+ 'lines': [
+ ['memory_atom', 'atom', 'absolute'],
+ ['memory_binary', 'binaries', 'absolute'],
+ ['memory_code', 'code', 'absolute'],
+ ['memory_ets', 'ets', 'absolute'],
+ ['memory_processes', 'procs', 'absolute'],
+ ['memory_other', 'other', 'absolute']
+ ]
+ },
+ 'erlang_reductions': {
+ 'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'],
+ 'lines': [
+ ['reductions', 'reductions', 'incremental']
+ ]
+ },
+ 'erlang_proc_counts': {
+ 'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'],
+ 'lines': [
+ ['os_proc_count', 'OS procs', 'absolute'],
+ ['process_count', 'erl procs', 'absolute']
+ ]
+ },
+ 'erlang_peak_msg_queue': {
+ 'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue',
+ 'line'],
+ 'lines': [
+ ['peak_msg_queue', 'peak size', 'absolute']
+ ]
+ },
+ # Lines for the following are added as part of check()
+ 'db_sizes_file': {
+ 'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'],
+ 'lines': []
+ },
+ 'db_sizes_external': {
+ 'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'],
+ 'lines': []
+ },
+ 'db_sizes_active': {
+ 'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'],
+ 'lines': []
+ },
+ 'db_doc_counts': {
+ 'options': [None, 'Database # of docs', 'docs',
+ 'perdbstats', 'couchdb_db_doc_count', 'line'],
+ 'lines': []
+ },
+ 'db_doc_del_counts': {
+ 'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
+ 'lines': []
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 5984)
+ self.node = self.configuration.get('node', 'couchdb@127.0.0.1')
+ self.scheme = self.configuration.get('scheme', 'http')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ try:
+ self.dbs = self.configuration.get('databases').split(' ')
+ except (KeyError, AttributeError):
+ self.dbs = list()
+
+ def check(self):
+ if not (self.host and self.port):
+ self.error('Host is not defined in the module configuration file')
+ return False
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(str(error))
+ return False
+ self.url = '{scheme}://{host}:{port}'.format(scheme=self.scheme,
+ host=self.host,
+ port=self.port)
+ stats = self.url + '/_node/{node}/_stats'.format(node=self.node)
+ active_tasks = self.url + '/_active_tasks'
+ system = self.url + '/_node/{node}/_system'.format(node=self.node)
+ self.methods = [METHODS(get_data=self._get_overview_stats,
+ url=stats,
+ stats=OVERVIEW_STATS),
+ METHODS(get_data=self._get_active_tasks_stats,
+ url=active_tasks,
+ stats=None),
+ METHODS(get_data=self._get_overview_stats,
+ url=system,
+ stats=SYSTEM_STATS),
+ METHODS(get_data=self._get_dbs_stats,
+ url=self.url,
+ stats=DB_STATS)]
+ # must initialise manager before using _get_raw_data
+ self._manager = self._build_manager()
+ self.dbs = [db for db in self.dbs
+ if self._get_raw_data(self.url + '/' + db)]
+ for db in self.dbs:
+ self.definitions['db_sizes_file']['lines'].append(
+ ['db_'+db+'_sizes_file', db, 'absolute', 1, 1000]
+ )
+ self.definitions['db_sizes_external']['lines'].append(
+ ['db_'+db+'_sizes_external', db, 'absolute', 1, 1000]
+ )
+ self.definitions['db_sizes_active']['lines'].append(
+ ['db_'+db+'_sizes_active', db, 'absolute', 1, 1000]
+ )
+ self.definitions['db_doc_counts']['lines'].append(
+ ['db_'+db+'_doc_count', db, 'absolute']
+ )
+ self.definitions['db_doc_del_counts']['lines'].append(
+ ['db_'+db+'_doc_del_count', db, 'absolute']
+ )
+ return UrlService.check(self)
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ th = Thread(target=method.get_data,
+ args=(queue, method.url, method.stats))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ # self.info('couchdb result = ' + str(result))
+ return result or None
+
+ def _get_overview_stats(self, queue, url, stats):
+ raw_data = self._get_raw_data(url)
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ to_netdata = self._fetch_data(raw_data=data, metrics=stats)
+ if 'message_queues' in data:
+ to_netdata['peak_msg_queue'] = get_peak_msg_queue(data)
+ return queue.put(to_netdata)
+
+ def _get_active_tasks_stats(self, queue, url, _):
+ taskdict = defaultdict(int)
+ taskdict["activetasks_indexer"] = 0
+ taskdict["activetasks_database_compaction"] = 0
+ taskdict["activetasks_replication"] = 0
+ taskdict["activetasks_view_compaction"] = 0
+ raw_data = self._get_raw_data(url)
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ for task in data:
+ taskdict["activetasks_" + task["type"]] += 1
+ return queue.put(dict(taskdict))
+
+ def _get_dbs_stats(self, queue, url, stats):
+ to_netdata = {}
+ for db in self.dbs:
+ raw_data = self._get_raw_data(url + '/' + db)
+ if not raw_data:
+ continue
+ data = loads(raw_data)
+ for metric in stats:
+ value = data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError as e:
+ self.debug('cannot process ' + metric + ' for ' + db
+ + ": " + str(e))
+ continue
+ metric_name = 'db_{0}_{1}'.format(db, '_'.join(metrics_list))
+ to_netdata[metric_name] = value
+ return queue.put(to_netdata)
+
+ def _fetch_data(self, raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError as e:
+ self.debug('cannot process ' + metric + ': ' + str(e))
+ continue
+ # strip off .value from end of stat
+ if metrics_list[-1] == 'value':
+ metrics_list = metrics_list[:-1]
+ # sum up 3xx/4xx/5xx
+ if metrics_list[0:2] == ['couchdb', 'httpd_status_codes'] and \
+ int(metrics_list[2]) > 202:
+ metrics_list[2] = '{0}xx'.format(int(metrics_list[2]) // 100)
+ if '_'.join(metrics_list) in data:
+ data['_'.join(metrics_list)] += value
+ else:
+ data['_'.join(metrics_list)] = value
+ else:
+ data['_'.join(metrics_list)] = value
+ return data
+
+
+def get_peak_msg_queue(data):
+ maxsize = 0
+ queues = data['message_queues']
+ for queue in iter(queues.values()):
+ if isinstance(queue, dict) and 'count' in queue:
+ value = queue['count']
+ elif isinstance(queue, int):
+ value = queue
+ else:
+ continue
+ maxsize = max(maxsize, value)
+ return maxsize
diff --git a/collectors/python.d.plugin/couchdb/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf
new file mode 100644
index 0000000..9c68be7
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/couchdb.conf
@@ -0,0 +1,89 @@
+# netdata python.d.plugin configuration for couchdb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# By default, CouchDB only updates its stats every 10 seconds.
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, the couchdb plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # CouchDB port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+# node: 'couchdb@127.0.0.1' # CouchDB node name. Same as -name vm.args argument.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# if db-specific stats are desired, place their names in databases:
+# databases: 'npm-registry animaldb'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: '5984'
+ node: 'couchdb@127.0.0.1'
+ scheme: 'http'
+# user: 'admin'
+# pass: 'password'
diff --git a/collectors/python.d.plugin/cpufreq/Makefile.inc b/collectors/python.d.plugin/cpufreq/Makefile.inc
new file mode 100644
index 0000000..d613880
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += cpufreq/cpufreq.chart.py
+dist_pythonconfig_DATA += cpufreq/cpufreq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpufreq/README.md cpufreq/Makefile.inc
+
diff --git a/collectors/python.d.plugin/cpufreq/README.md b/collectors/python.d.plugin/cpufreq/README.md
new file mode 100644
index 0000000..f1fc1e8
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/README.md
@@ -0,0 +1,37 @@
+# cpufreq
+
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT
+
+---
+
+This module shows the current CPU frequency as set by the cpufreq kernel
+module.
+
+**Requirement:**
+You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
+enabled in your kernel.
+
+This module tries to read from one of two possible locations. On
+initialization, it tries to read the `time_in_state` files provided by
+cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it
+falls back to using the more inaccurate `scaling_cur_freq` file (which only
+represents the **current** CPU frequency, and doesn't account for any state
+changes which happen between updates).
+
+It produces one chart with multiple lines (one line per core).
+
+### configuration
+
+Sample:
+
+```yaml
+sys_dir: "/sys/devices"
+```
+
+If no configuration is given, module will search for cpufreq files in `/sys/devices` directory.
+Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcpufreq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/cpufreq/cpufreq.chart.py b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
new file mode 100644
index 0000000..cbbab6d
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Description: cpufreq netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Steven Noonan (tycho)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import glob
+import os
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+
+ORDER = ['cpufreq']
+
+CHARTS = {
+ 'cpufreq': {
+ 'options': [None, 'CPU Clock', 'MHz', 'cpufreq', 'cpufreq.cpufreq', 'line'],
+ 'lines': [
+ # lines are created dynamically in `check()` method
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ prefix = os.getenv('NETDATA_HOST_PREFIX', "")
+ if prefix.endswith('/'):
+ prefix = prefix[:-1]
+ self.sys_dir = prefix + "/sys/devices"
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.fake_name = 'cpu'
+ self.assignment = {}
+ self.accurate_exists = True
+ self.accurate_last = {}
+
+ def _get_data(self):
+ data = {}
+
+ if self.accurate_exists:
+ accurate_ok = True
+
+ for name, paths in self.assignment.items():
+ last = self.accurate_last[name]
+
+ current = {}
+ deltas = {}
+ ticks_since_last = 0
+
+ for line in open(paths['accurate'], 'r'):
+ line = list(map(int, line.split()))
+ current[line[0]] = line[1]
+ ticks = line[1] - last.get(line[0], 0)
+ ticks_since_last += ticks
+ deltas[line[0]] = line[1] - last.get(line[0], 0)
+
+ avg_freq = 0
+ if ticks_since_last != 0:
+ for frequency, ticks in deltas.items():
+ avg_freq += frequency * ticks
+ avg_freq /= ticks_since_last
+
+ data[name] = avg_freq
+ self.accurate_last[name] = current
+ if avg_freq == 0 or ticks_since_last == 0:
+ # Delta is either too large or nonexistent, fall back to
+ # less accurate reading. This can happen if we switch
+ # to/from the 'schedutil' governor, which doesn't report
+ # stats.
+ accurate_ok = False
+
+ if accurate_ok:
+ return data
+
+ for name, paths in self.assignment.items():
+ data[name] = open(paths['inaccurate'], 'r').read()
+
+ return data
+
+ def check(self):
+ try:
+ self.sys_dir = str(self.configuration['sys_dir'])
+ except (KeyError, TypeError):
+ self.error("No path specified. Using: '" + self.sys_dir + "'")
+
+ for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/stats/time_in_state'):
+ path_elem = path.split('/')
+ cpu = path_elem[-4]
+ if cpu not in self.assignment:
+ self.assignment[cpu] = {}
+ self.assignment[cpu]['accurate'] = path
+ self.accurate_last[cpu] = {}
+
+ if not self.assignment:
+ self.accurate_exists = False
+
+ for path in glob.glob(self.sys_dir + '/system/cpu/cpu*/cpufreq/scaling_cur_freq'):
+ path_elem = path.split('/')
+ cpu = path_elem[-3]
+ if cpu not in self.assignment:
+ self.assignment[cpu] = {}
+ self.assignment[cpu]['inaccurate'] = path
+
+ if not self.assignment:
+ self.error("couldn't find a method to read cpufreq statistics")
+ return False
+
+ for name in sorted(self.assignment, key=lambda v: int(v[3:])):
+ self.definitions[ORDER[0]]['lines'].append([name, name, 'absolute', 1, 1000])
+
+ return True
diff --git a/collectors/python.d.plugin/cpufreq/cpufreq.conf b/collectors/python.d.plugin/cpufreq/cpufreq.conf
new file mode 100644
index 0000000..96c0884
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.conf
@@ -0,0 +1,41 @@
+# netdata python.d.plugin configuration for cpufreq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# The directory to search for the file scaling_cur_freq
+sys_dir: "/sys/devices"
diff --git a/collectors/python.d.plugin/cpuidle/Makefile.inc b/collectors/python.d.plugin/cpuidle/Makefile.inc
new file mode 100644
index 0000000..66c47d3
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += cpuidle/cpuidle.chart.py
+dist_pythonconfig_DATA += cpuidle/cpuidle.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += cpuidle/README.md cpuidle/Makefile.inc
+
diff --git a/collectors/python.d.plugin/cpuidle/README.md b/collectors/python.d.plugin/cpuidle/README.md
new file mode 100644
index 0000000..bb6722a
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/README.md
@@ -0,0 +1,13 @@
+# cpuidle
+
+This module monitors the usage of CPU idle states.
+
+**Requirement:**
+Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
+
+It produces one stacked chart per CPU, showing the percentage of time spent in
+each state.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fcpuidle%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.chart.py b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
new file mode 100644
index 0000000..feac025
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+# Description: cpuidle netdata python.d module
+# Author: Steven Noonan (tycho)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import ctypes
+import glob
+import os
+import platform
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+syscall = ctypes.CDLL('libc.so.6').syscall
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ prefix = os.getenv('NETDATA_HOST_PREFIX', "")
+ if prefix.endswith('/'):
+ prefix = prefix[:-1]
+ self.sys_dir = prefix + "/sys/devices/system/cpu"
+ self.schedstat_path = prefix + "/proc/schedstat"
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = []
+ self.definitions = {}
+ self.fake_name = 'cpu'
+ self.assignment = {}
+ self.last_schedstat = None
+
+ @staticmethod
+ def __gettid():
+ # This is horrendous. We need the *thread id* (not the *process id*),
+ # but there's no Python standard library way of doing that. If you need
+ # to enable this module on a non-x86 machine type, you'll have to find
+ # the Linux syscall number for gettid() and add it to the dictionary
+ # below.
+ syscalls = {
+ 'i386': 224,
+ 'x86_64': 186,
+ }
+ if platform.machine() not in syscalls:
+ return None
+ tid = syscall(syscalls[platform.machine()])
+ return tid
+
+ def __wake_cpus(self, cpus):
+ # Requires Python 3.3+. This will "tickle" each CPU to force it to
+ # update its idle counters.
+ if hasattr(os, 'sched_setaffinity'):
+ pid = self.__gettid()
+ save_affinity = os.sched_getaffinity(pid)
+ for idx in cpus:
+ os.sched_setaffinity(pid, [idx])
+ os.sched_getaffinity(pid)
+ os.sched_setaffinity(pid, save_affinity)
+
+ def __read_schedstat(self):
+ cpus = {}
+ for line in open(self.schedstat_path, 'r'):
+ if not line.startswith('cpu'):
+ continue
+ line = line.rstrip().split()
+ cpu = line[0]
+ active_time = line[7]
+ cpus[cpu] = int(active_time) // 1000
+ return cpus
+
+ def _get_data(self):
+ results = {}
+
+ # Use the kernel scheduler stats to determine how much time was spent
+ # in C0 (active).
+ schedstat = self.__read_schedstat()
+
+ # Determine if any of the CPUs are idle. If they are, then we need to
+ # tickle them in order to update their C-state residency statistics.
+ if self.last_schedstat is None:
+ needs_tickle = list(self.assignment.keys())
+ else:
+ needs_tickle = []
+ for cpu, active_time in self.last_schedstat.items():
+ delta = schedstat[cpu] - active_time
+ if delta < 1:
+ needs_tickle.append(cpu)
+
+ if needs_tickle:
+ # This line is critical for the stats to update. If we don't "tickle"
+ # idle CPUs, then the counters for those CPUs stop counting.
+ self.__wake_cpus([int(cpu[3:]) for cpu in needs_tickle])
+
+ # Re-read schedstat now that we've tickled any idlers.
+ schedstat = self.__read_schedstat()
+
+ self.last_schedstat = schedstat
+
+ for cpu, metrics in self.assignment.items():
+ update_time = schedstat[cpu]
+ results[cpu + '_active_time'] = update_time
+
+ for metric, path in metrics.items():
+ residency = int(open(path, 'r').read())
+ results[metric] = residency
+
+ return results
+
+ def check(self):
+ if self.__gettid() is None:
+ self.error('Cannot get thread ID. Stats would be completely broken.')
+ return False
+
+ for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
+ # ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name']
+ path_elem = path.split('/')
+ cpu = path_elem[-4]
+ state = path_elem[-2]
+ statename = open(path, 'rt').read().rstrip()
+
+ orderid = '%s_cpuidle' % (cpu,)
+ if orderid not in self.definitions:
+ self.order.append(orderid)
+ active_name = '%s_active_time' % (cpu,)
+ self.definitions[orderid] = {
+ 'options': [None, 'C-state residency', 'time%', 'cpuidle', 'cpuidle.cpuidle', 'stacked'],
+ 'lines': [
+ [active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1],
+ ],
+ }
+ self.assignment[cpu] = {}
+
+ defid = '%s_%s_time' % (orderid, state)
+
+ self.definitions[orderid]['lines'].append(
+ [defid, statename, 'percentage-of-incremental-row', 1, 1]
+ )
+
+ self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time'])
+
+ # Sort order by kernel-specified CPU index
+ self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
+
+ if not self.definitions:
+ self.error("couldn't find cstate stats")
+ return False
+
+ return True
diff --git a/collectors/python.d.plugin/cpuidle/cpuidle.conf b/collectors/python.d.plugin/cpuidle/cpuidle.conf
new file mode 100644
index 0000000..25f5fed
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.conf
@@ -0,0 +1,38 @@
+# netdata python.d.plugin configuration for cpuidle
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
diff --git a/collectors/python.d.plugin/dns_query_time/Makefile.inc b/collectors/python.d.plugin/dns_query_time/Makefile.inc
new file mode 100644
index 0000000..7eca3e0
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dns_query_time/dns_query_time.chart.py
+dist_pythonconfig_DATA += dns_query_time/dns_query_time.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dns_query_time/README.md dns_query_time/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
new file mode 100644
index 0000000..73d70d3
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -0,0 +1,12 @@
+# dns_query_time
+
+This module provides DNS query time statistics.
+
+**Requirement:**
+* `python-dnspython` package
+
+It produces one aggregate chart or one chart per DNS server, showing the query time.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdns_query_time%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
new file mode 100644
index 0000000..4a5e0e1
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+# Description: dns_query_time netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from random import choice
+from socket import getaddrinfo, gaierror
+from threading import Thread
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+try:
+ import dns.message
+ import dns.query
+ import dns.name
+ DNS_PYTHON = True
+except ImportError:
+ DNS_PYTHON = False
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+update_every = 5
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.timeout = self.configuration.get('response_timeout', 4)
+ self.aggregate = self.configuration.get('aggregate', True)
+ self.domains = self.configuration.get('domains')
+ self.server_list = self.configuration.get('dns_servers')
+
+ def check(self):
+ if not DNS_PYTHON:
+ self.error("'python-dnspython' package is needed to use dns_query_time.chart.py")
+ return False
+
+ self.timeout = self.timeout if isinstance(self.timeout, int) else 4
+
+ if not all([self.domains, self.server_list,
+ isinstance(self.server_list, str), isinstance(self.domains, str)]):
+ self.error("server_list and domain_list can't be empty")
+ return False
+ else:
+ self.domains, self.server_list = self.domains.split(), self.server_list.split()
+
+ for ns in self.server_list:
+ if not check_ns(ns):
+ self.info('Bad NS: %s' % ns)
+ self.server_list.remove(ns)
+ if not self.server_list:
+ return False
+
+ data = self._get_data(timeout=1)
+
+ down_servers = [s for s in data if data[s] == -100]
+ for down in down_servers:
+ down = down[3:].replace('_', '.')
+ self.info('Removed due to non response %s' % down)
+ self.server_list.remove(down)
+ if not self.server_list:
+ return False
+
+ self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list)
+ return True
+
+ def _get_data(self, timeout=None):
+ return dns_request(self.server_list, timeout or self.timeout, self.domains)
+
+
+def dns_request(server_list, timeout, domains):
+ threads = list()
+ que = Queue()
+ result = dict()
+
+ def dns_req(ns, t, q):
+ domain = dns.name.from_text(choice(domains))
+ request = dns.message.make_query(domain, dns.rdatatype.A)
+
+ try:
+ dns_start = time()
+ dns.query.udp(request, ns, timeout=t)
+ dns_end = time()
+ query_time = round((dns_end - dns_start) * 1000)
+ q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time})
+ except dns.exception.Timeout:
+ q.put({'_'.join(['ns', ns.replace('.', '_')]): -100})
+
+ for server in server_list:
+ th = Thread(target=dns_req, args=(server, timeout, que))
+ th.start()
+ threads.append(th)
+
+ for th in threads:
+ th.join()
+ result.update(que.get())
+
+ return result
+
+
+def check_ns(ns):
+ try:
+ return getaddrinfo(ns, 'domain')[0][4][0]
+ except gaierror:
+ return False
+
+
+def create_charts(aggregate, server_list):
+ if aggregate:
+ order = ['dns_group']
+ definitions = {
+ 'dns_group': {
+ 'options': [None, 'DNS Response Time', 'ms', 'name servers', 'dns_query_time.response_time', 'line'],
+ 'lines': []
+ }
+ }
+ for ns in server_list:
+ dim = [
+ '_'.join(['ns', ns.replace('.', '_')]),
+ ns,
+ 'absolute',
+ ]
+ definitions['dns_group']['lines'].append(dim)
+
+ return order, definitions
+ else:
+ order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
+ definitions = dict()
+
+ for ns in server_list:
+ definitions[''.join(['dns_', ns.replace('.', '_')])] = {
+ 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
+ 'lines': [
+ [
+ '_'.join(['ns', ns.replace('.', '_')]),
+ ns,
+ 'absolute',
+ ]
+ ]
+ }
+ return order, definitions
diff --git a/collectors/python.d.plugin/dns_query_time/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
new file mode 100644
index 0000000..9c0838e
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
@@ -0,0 +1,69 @@
+# netdata python.d.plugin configuration for dns_query_time
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dns_query_time also supports the following:
+#
+# dns_servers: 'dns servers' # List of dns servers to query
+# domains: 'domains' # List of domains
+# aggregate: yes/no # Aggregate all servers in one chart or not
+# response_timeout: 4 # Dns query response timeout (query = -100 if response time > response_time)
+#
+# ---------------------------------------------------------------------- \ No newline at end of file
diff --git a/collectors/python.d.plugin/dnsdist/Makefile.inc b/collectors/python.d.plugin/dnsdist/Makefile.inc
new file mode 100644
index 0000000..a53f518
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dnsdist/dnsdist.chart.py
+dist_pythonconfig_DATA += dnsdist/dnsdist.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dnsdist/README.md dnsdist/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
new file mode 100644
index 0000000..c7647a1
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -0,0 +1,56 @@
+# dnsdist
+
+Module monitor dnsdist performance and health metrics.
+
+Following charts are drawn:
+
+1. **Response latency**
+ * latency-slow
+ * latency100-1000
+ * latency50-100
+ * latency10-50
+ * latency1-10
+ * latency0-1
+
+2. **Cache performance**
+ * cache-hits
+ * cache-misses
+
+3. **ACL events**
+ * acl-drops
+ * rule-drop
+ * rule-nxdomain
+ * rule-refused
+
+4. **Noncompliant data**
+ * empty-queries
+ * no-policy
+ * noncompliant-queries
+ * noncompliant-responses
+
+5. **Queries**
+ * queries
+ * rdqueries
+ * rdqueries
+
+6. **Health**
+ * downstream-send-errors
+ * downstream-timeouts
+ * servfail-responses
+ * trunc-failures
+
+### configuration
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:5053/jsonstat?command=stats'
+ user : 'username'
+ pass : 'password'
+ header:
+ X-API-Key: 'dnsdist-api-key'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdnsdist%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
new file mode 100644
index 0000000..d608586
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'queries',
+ 'queries_dropped',
+ 'packets_dropped',
+ 'answers',
+ 'backend_responses',
+ 'backend_commerrors',
+ 'backend_errors',
+ 'cache',
+ 'servercpu',
+ 'servermem',
+ 'query_latency',
+ 'query_latency_avg'
+]
+
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
+ 'lines': [
+ ['queries', 'all', 'incremental'],
+ ['rdqueries', 'recursive', 'incremental'],
+ ['empty-queries', 'empty', 'incremental']
+ ]
+ },
+ 'queries_dropped': {
+ 'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'],
+ 'lines': [
+ ['rule-drop', 'rule drop', 'incremental'],
+ ['dyn-blocked', 'dynamic block', 'incremental'],
+ ['no-policy', 'no policy', 'incremental'],
+ ['noncompliant-queries', 'non compliant', 'incremental']
+ ]
+ },
+ 'packets_dropped': {
+ 'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'],
+ 'lines': [
+ ['acl-drops', 'acl', 'incremental']
+ ]
+ },
+ 'answers': {
+ 'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'],
+ 'lines': [
+ ['self-answered', 'self answered', 'incremental'],
+ ['rule-nxdomain', 'nxdomain', 'incremental', -1],
+ ['rule-refused', 'refused', 'incremental', -1],
+ ['trunc-failures', 'trunc failures', 'incremental', -1]
+ ]
+ },
+ 'backend_responses': {
+ 'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'],
+ 'lines': [
+ ['responses', 'responses', 'incremental']
+ ]
+ },
+ 'backend_commerrors': {
+ 'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'],
+ 'lines': [
+ ['downstream-send-errors', 'send errors', 'incremental']
+ ]
+ },
+ 'backend_errors': {
+ 'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'],
+ 'lines': [
+ ['downstream-timeouts', 'timeout', 'incremental'],
+ ['servfail-responses', 'servfail', 'incremental'],
+ ['noncompliant-responses', 'non compliant', 'incremental']
+ ]
+ },
+ 'cache': {
+ 'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'],
+ 'lines': [
+ ['cache-hits', 'hits', 'incremental'],
+ ['cache-misses', 'misses', 'incremental', -1]
+ ]
+ },
+ 'servercpu': {
+ 'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'],
+ 'lines': [
+ ['cpu-sys-msec', 'system state', 'incremental'],
+ ['cpu-user-msec', 'user state', 'incremental']
+ ]
+ },
+ 'servermem': {
+ 'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
+ 'lines': [
+ ['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'query_latency': {
+ 'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'],
+ 'lines': [
+ ['latency0-1', '1ms', 'incremental'],
+ ['latency1-10', '10ms', 'incremental'],
+ ['latency10-50', '50ms', 'incremental'],
+ ['latency50-100', '100ms', 'incremental'],
+ ['latency100-1000', '1sec', 'incremental'],
+ ['latency-slow', 'slow', 'incremental']
+ ]
+ },
+ 'query_latency_avg': {
+ 'options': [None, 'Average latency for the last N queries', 'ms/query', 'latency',
+ 'dnsdist.query_latency_avg', 'line'],
+ 'lines': [
+ ['latency-avg100', '100', 'absolute'],
+ ['latency-avg1000', '1k', 'absolute'],
+ ['latency-avg10000', '10k', 'absolute'],
+ ['latency-avg1000000', '1000k', 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ data = self._get_raw_data()
+ if not data:
+ return None
+
+ return loads(data)
diff --git a/collectors/python.d.plugin/dnsdist/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf
new file mode 100644
index 0000000..324d65a
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.conf
@@ -0,0 +1,83 @@
+# netdata python.d.plugin configuration for dnsdist
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+#update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+#autodetection_retry: 1
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+#
+# Additionally to the above, dnsdist also supports the following:
+#
+# url: 'URL' # the URL to fetch dnsdist performance statistics
+# user: 'username' # username for basic auth
+# pass: 'password' # password for basic auth
+# header:
+# X-API-Key: 'Key' # API key
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+# localhost:
+# name : 'local'
+# url : 'http://127.0.0.1:5053/jsonstat?command=stats'
+# user : 'username'
+# pass : 'password'
+# header:
+# X-API-Key: 'dnsdist-api-key'
+
+
diff --git a/collectors/python.d.plugin/dockerd/Makefile.inc b/collectors/python.d.plugin/dockerd/Makefile.inc
new file mode 100644
index 0000000..b100bc6
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dockerd/dockerd.chart.py
+dist_pythonconfig_DATA += dockerd/dockerd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dockerd/README.md dockerd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
new file mode 100644
index 0000000..b09a5d5
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -0,0 +1,28 @@
+# dockerd
+
+Module monitor docker health metrics.
+
+**Requirement:**
+* `docker` package, required version 3.2.0+
+
+Following charts are drawn:
+
+1. **running containers**
+ * count
+
+2. **healthy containers**
+ * count
+
+3. **unhealthy containers**
+ * count
+
+### configuration
+
+```yaml
+ update_every : 1
+ priority : 60000
+ ```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdockerd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
new file mode 100644
index 0000000..8bd45df
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+# Description: docker netdata python.d module
+# Author: Kévin Darcel (@tuxity)
+
+try:
+ import docker
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+from distutils.version import StrictVersion
+
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'running_containers',
+ 'healthy_containers',
+ 'unhealthy_containers'
+]
+
+CHARTS = {
+ 'running_containers': {
+ 'options': [None, 'Number of running containers', 'containers', 'running containers',
+ 'docker.running_containers', 'line'],
+ 'lines': [
+ ['running_containers', 'running']
+ ]
+ },
+ 'healthy_containers': {
+ 'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
+ 'docker.healthy_containers', 'line'],
+ 'lines': [
+ ['healthy_containers', 'healthy']
+ ]
+ },
+ 'unhealthy_containers': {
+ 'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
+ 'docker.unhealthy_containers', 'line'],
+ 'lines': [
+ ['unhealthy_containers', 'unhealthy']
+ ]
+ }
+}
+
+
+MIN_REQUIRED_VERSION = '3.2.0'
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.client = None
+
+ def check(self):
+ if not HAS_DOCKER:
+ self.error("'docker' package is needed to use dockerd module")
+ return False
+
+ if StrictVersion(docker.__version__) < StrictVersion(MIN_REQUIRED_VERSION):
+ self.error("installed 'docker' package version {0}, minimum required version {1}, please upgrade".format(
+ docker.__version__,
+ MIN_REQUIRED_VERSION,
+ ))
+ return False
+
+ self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
+
+ try:
+ self.client.ping()
+ except docker.errors.APIError as error:
+ self.error(error)
+ return False
+
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ data['running_containers'] = len(self.client.containers.list(sparse=True))
+ data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
+ data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
+
+ return data or None
diff --git a/collectors/python.d.plugin/dockerd/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf
new file mode 100644
index 0000000..96c8ee0
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/dockerd.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for dockerd health data API
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dockerd plugin also supports the following:
+#
+# url: '<scheme>://<host>:<port>/<health_page_api>'
+# # http://localhost:8080/health
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ url: 'unix://var/run/docker.sock'
diff --git a/collectors/python.d.plugin/dovecot/Makefile.inc b/collectors/python.d.plugin/dovecot/Makefile.inc
new file mode 100644
index 0000000..fd7d13b
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dovecot/dovecot.chart.py
+dist_pythonconfig_DATA += dovecot/dovecot.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dovecot/README.md dovecot/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
new file mode 100644
index 0000000..de8788b
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -0,0 +1,79 @@
+# dovecot
+
+This module provides statistics information from Dovecot server.
+
+Statistics are taken from dovecot socket by executing `EXPORT global` command.
+More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
+
+Module isn't compatible with new statistic api (v2.3), but you are still able to use the module with Dovecot v2.3
+by following [upgrading steps.](https://wiki2.dovecot.org/Upgrading/2.3).
+
+**Requirement:**
+Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket.
+
+Module gives information with following charts:
+
+1. **sessions**
+ * active sessions
+
+2. **logins**
+ * logins
+
+3. **commands** - number of IMAP commands
+ * commands
+
+4. **Faults**
+ * minor
+ * major
+
+5. **Context Switches**
+ * volountary
+ * involountary
+
+6. **disk** in bytes/s
+ * read
+ * write
+
+7. **bytes** in bytes/s
+ * read
+ * write
+
+8. **number of syscalls** in syscalls/s
+ * read
+ * write
+
+9. **lookups** - number of lookups per second
+ * path
+ * attr
+
+10. **hits** - number of cache hits
+ * hits
+
+11. **attempts** - authorization attempts
+ * success
+ * failure
+
+12. **cache** - cached authorization hits
+ * hit
+ * miss
+
+### configuration
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+```
+
+If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fdovecot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
new file mode 100644
index 0000000..be1fa53
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+# Description: dovecot netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+
+UNIX_SOCKET = '/var/run/dovecot/stats'
+
+
+ORDER = [
+ 'sessions',
+ 'logins',
+ 'commands',
+ 'faults',
+ 'context_switches',
+ 'io',
+ 'net',
+ 'syscalls',
+ 'lookup',
+ 'cache',
+ 'auth',
+ 'auth_cache'
+]
+
+CHARTS = {
+ 'sessions': {
+ 'options': [None, 'Dovecot Active Sessions', 'number', 'sessions', 'dovecot.sessions', 'line'],
+ 'lines': [
+ ['num_connected_sessions', 'active sessions', 'absolute']
+ ]
+ },
+ 'logins': {
+ 'options': [None, 'Dovecot Logins', 'number', 'logins', 'dovecot.logins', 'line'],
+ 'lines': [
+ ['num_logins', 'logins', 'absolute']
+ ]
+ },
+ 'commands': {
+ 'options': [None, 'Dovecot Commands', 'commands', 'commands', 'dovecot.commands', 'line'],
+ 'lines': [
+ ['num_cmds', 'commands', 'absolute']
+ ]
+ },
+ 'faults': {
+ 'options': [None, 'Dovecot Page Faults', 'faults', 'page faults', 'dovecot.faults', 'line'],
+ 'lines': [
+ ['min_faults', 'minor', 'absolute'],
+ ['maj_faults', 'major', 'absolute']
+ ]
+ },
+ 'context_switches': {
+ 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches', 'line'],
+ 'lines': [
+ ['vol_cs', 'voluntary', 'absolute'],
+ ['invol_cs', 'involuntary', 'absolute']
+ ]
+ },
+ 'io': {
+ 'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'],
+ 'lines': [
+ ['disk_input', 'read', 'incremental', 1, 1024],
+ ['disk_output', 'write', 'incremental', -1, 1024]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
+ 'lines': [
+ ['read_bytes', 'read', 'incremental', 8, 1000],
+ ['write_bytes', 'write', 'incremental', -8, 1000]
+ ]
+ },
+ 'syscalls': {
+ 'options': [None, 'Dovecot Number of SysCalls', 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
+ 'lines': [
+ ['read_count', 'read', 'incremental'],
+ ['write_count', 'write', 'incremental']
+ ]
+ },
+ 'lookup': {
+ 'options': [None, 'Dovecot Lookups', 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
+ 'lines': [
+ ['mail_lookup_path', 'path', 'incremental'],
+ ['mail_lookup_attr', 'attr', 'incremental']
+ ]
+ },
+ 'cache': {
+ 'options': [None, 'Dovecot Cache Hits', 'hits/s', 'cache', 'dovecot.cache', 'line'],
+ 'lines': [
+ ['mail_cache_hits', 'hits', 'incremental']
+ ]
+ },
+ 'auth': {
+ 'options': [None, 'Dovecot Authentications', 'attempts', 'logins', 'dovecot.auth', 'stacked'],
+ 'lines': [
+ ['auth_successes', 'ok', 'absolute'],
+ ['auth_failures', 'failed', 'absolute']
+ ]
+ },
+ 'auth_cache': {
+ 'options': [None, 'Dovecot Authentication Cache', 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
+ 'lines': [
+ ['auth_cache_hits', 'hit', 'absolute'],
+ ['auth_cache_misses', 'miss', 'absolute']
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = None # localhost
+ self.port = None # 24242
+ self.unix_socket = UNIX_SOCKET
+ self.request = 'EXPORT\tglobal\r\n'
+
+ def _get_data(self):
+ """
+ Format data received from socket
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()
+ except (ValueError, AttributeError):
+ return None
+
+ if raw is None:
+ self.debug('dovecot returned no data')
+ return None
+
+ data = raw.split('\n')[:2]
+ desc = data[0].split('\t')
+ vals = data[1].split('\t')
+ ret = dict()
+ for i, _ in enumerate(desc):
+ try:
+ ret[str(desc[i])] = int(vals[i])
+ except ValueError:
+ continue
+ return ret or None
diff --git a/collectors/python.d.plugin/dovecot/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf
new file mode 100644
index 0000000..451dbc9
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/dovecot.conf
@@ -0,0 +1,98 @@
+# netdata python.d.plugin configuration for dovecot
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dovecot also supports the following:
+#
+# socket: 'path/to/dovecot/stats'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 24242
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+
+localsocket_old:
+ name : 'local'
+ socket : '/var/run/dovecot/old-stats'
+
diff --git a/collectors/python.d.plugin/elasticsearch/Makefile.inc b/collectors/python.d.plugin/elasticsearch/Makefile.inc
new file mode 100644
index 0000000..15c63c2
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += elasticsearch/elasticsearch.chart.py
+dist_pythonconfig_DATA += elasticsearch/elasticsearch.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += elasticsearch/README.md elasticsearch/Makefile.inc
+
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
new file mode 100644
index 0000000..6d25b02
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -0,0 +1,62 @@
+# elasticsearch
+
+This module monitors Elasticsearch performance and health metrics.
+
+It produces:
+
+1. **Search performance** charts:
+ * Number of queries, fetches
+ * Time spent on queries, fetches
+ * Query and fetch latency
+
+2. **Indexing performance** charts:
+ * Number of documents indexed, index refreshes, flushes
+ * Time spent on indexing, refreshing, flushing
+ * Indexing and flushing latency
+
+3. **Memory usage and garbace collection** charts:
+ * JVM heap currently in use, committed
+ * Count of garbage collections
+ * Time spent on garbage collections
+
+4. **Host metrics** charts:
+ * Available file descriptors in percent
+ * Opened HTTP connections
+ * Cluster communication transport metrics
+
+5. **Queues and rejections** charts:
+ * Number of queued/rejected threads in thread pool
+
+6. **Fielddata cache** charts:
+ * Fielddata cache size
+ * Fielddata evictions and circuit breaker tripped count
+
+7. **Cluster health API** charts:
+ * Cluster status
+ * Nodes and tasks statistics
+ * Shards statistics
+
+8. **Cluster stats API** charts:
+ * Nodes statistics
+ * Query cache statistics
+ * Docs statistics
+ * Store statistics
+ * Indices and shards statistics
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'ipaddress' # Elasticsearch server ip address or hostname
+ port : 'port' # Port on which elasticsearch listens
+ cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default.
+ cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default.
+```
+
+If no configuration is given, module will fail to run.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Felasticsearch%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
new file mode 100644
index 0000000..f1ea03f
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
@@ -0,0 +1,667 @@
+# -*- coding: utf-8 -*-
+# Description: elastic search node stats netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+import threading
+
+from collections import namedtuple
+from socket import gethostbyname, gaierror
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'run'])
+
+NODE_STATS = [
+ 'indices.search.fetch_current',
+ 'indices.search.fetch_total',
+ 'indices.search.query_current',
+ 'indices.search.query_total',
+ 'indices.search.query_time_in_millis',
+ 'indices.search.fetch_time_in_millis',
+ 'indices.indexing.index_total',
+ 'indices.indexing.index_current',
+ 'indices.indexing.index_time_in_millis',
+ 'indices.refresh.total',
+ 'indices.refresh.total_time_in_millis',
+ 'indices.flush.total',
+ 'indices.flush.total_time_in_millis',
+ 'indices.translog.operations',
+ 'indices.translog.size_in_bytes',
+ 'indices.translog.uncommitted_operations',
+ 'indices.translog.uncommitted_size_in_bytes',
+ 'indices.segments.count',
+ 'indices.segments.terms_memory_in_bytes',
+ 'indices.segments.stored_fields_memory_in_bytes',
+ 'indices.segments.term_vectors_memory_in_bytes',
+ 'indices.segments.norms_memory_in_bytes',
+ 'indices.segments.points_memory_in_bytes',
+ 'indices.segments.doc_values_memory_in_bytes',
+ 'indices.segments.index_writer_memory_in_bytes',
+ 'indices.segments.version_map_memory_in_bytes',
+ 'indices.segments.fixed_bit_set_memory_in_bytes',
+ 'jvm.gc.collectors.young.collection_count',
+ 'jvm.gc.collectors.old.collection_count',
+ 'jvm.gc.collectors.young.collection_time_in_millis',
+ 'jvm.gc.collectors.old.collection_time_in_millis',
+ 'jvm.mem.heap_used_percent',
+ 'jvm.mem.heap_used_in_bytes',
+ 'jvm.mem.heap_committed_in_bytes',
+ 'jvm.buffer_pools.direct.count',
+ 'jvm.buffer_pools.direct.used_in_bytes',
+ 'jvm.buffer_pools.direct.total_capacity_in_bytes',
+ 'jvm.buffer_pools.mapped.count',
+ 'jvm.buffer_pools.mapped.used_in_bytes',
+ 'jvm.buffer_pools.mapped.total_capacity_in_bytes',
+ 'thread_pool.bulk.queue',
+ 'thread_pool.bulk.rejected',
+ 'thread_pool.write.queue',
+ 'thread_pool.write.rejected',
+ 'thread_pool.index.queue',
+ 'thread_pool.index.rejected',
+ 'thread_pool.search.queue',
+ 'thread_pool.search.rejected',
+ 'thread_pool.merge.queue',
+ 'thread_pool.merge.rejected',
+ 'indices.fielddata.memory_size_in_bytes',
+ 'indices.fielddata.evictions',
+ 'breakers.fielddata.tripped',
+ 'http.current_open',
+ 'transport.rx_size_in_bytes',
+ 'transport.tx_size_in_bytes',
+ 'process.max_file_descriptors',
+ 'process.open_file_descriptors'
+]
+
+CLUSTER_STATS = [
+ 'nodes.count.data_only',
+ 'nodes.count.master_data',
+ 'nodes.count.total',
+ 'nodes.count.master_only',
+ 'nodes.count.client',
+ 'indices.docs.count',
+ 'indices.query_cache.hit_count',
+ 'indices.query_cache.miss_count',
+ 'indices.store.size_in_bytes',
+ 'indices.count',
+ 'indices.shards.total'
+]
+
+HEALTH_STATS = [
+ 'number_of_nodes',
+ 'number_of_data_nodes',
+ 'number_of_pending_tasks',
+ 'number_of_in_flight_fetch',
+ 'active_shards',
+ 'relocating_shards',
+ 'unassigned_shards',
+ 'delayed_unassigned_shards',
+ 'initializing_shards',
+ 'active_shards_percent_as_number'
+]
+
+LATENCY = {
+ 'query_latency': {
+ 'total': 'indices_search_query_total',
+ 'spent_time': 'indices_search_query_time_in_millis'
+ },
+ 'fetch_latency': {
+ 'total': 'indices_search_fetch_total',
+ 'spent_time': 'indices_search_fetch_time_in_millis'
+ },
+ 'indexing_latency': {
+ 'total': 'indices_indexing_index_total',
+ 'spent_time': 'indices_indexing_index_time_in_millis'
+ },
+ 'flushing_latency': {
+ 'total': 'indices_flush_total',
+ 'spent_time': 'indices_flush_total_time_in_millis'
+ }
+}
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'search_performance_total',
+ 'search_performance_current',
+ 'search_performance_time',
+ 'search_latency',
+ 'index_performance_total',
+ 'index_performance_current',
+ 'index_performance_time',
+ 'index_latency',
+ 'index_translog_operations',
+ 'index_translog_size',
+ 'index_segments_count',
+ 'index_segments_memory_writer',
+ 'index_segments_memory',
+ 'jvm_mem_heap',
+ 'jvm_mem_heap_bytes',
+ 'jvm_buffer_pool_count',
+ 'jvm_direct_buffers_memory',
+ 'jvm_mapped_buffers_memory',
+ 'jvm_gc_count',
+ 'jvm_gc_time',
+ 'host_metrics_file_descriptors',
+ 'host_metrics_http',
+ 'host_metrics_transport',
+ 'thread_pool_queued',
+ 'thread_pool_rejected',
+ 'fielddata_cache',
+ 'fielddata_evictions_tripped',
+ 'cluster_health_status',
+ 'cluster_health_nodes',
+ 'cluster_health_pending_tasks',
+ 'cluster_health_flight_fetch',
+ 'cluster_health_shards',
+ 'cluster_stats_nodes',
+ 'cluster_stats_query_cache',
+ 'cluster_stats_docs',
+ 'cluster_stats_store',
+ 'cluster_stats_indices',
+ 'cluster_stats_shards_total',
+]
+
+CHARTS = {
+ 'search_performance_total': {
+ 'options': [None, 'Queries And Fetches', 'events/s', 'search performance',
+ 'elastic.search_performance_total', 'stacked'],
+ 'lines': [
+ ['indices_search_query_total', 'queries', 'incremental'],
+ ['indices_search_fetch_total', 'fetches', 'incremental']
+ ]
+ },
+ 'search_performance_current': {
+ 'options': [None, 'Queries and Fetches In Progress', 'events', 'search performance',
+ 'elastic.search_performance_current', 'stacked'],
+ 'lines': [
+ ['indices_search_query_current', 'queries', 'absolute'],
+ ['indices_search_fetch_current', 'fetches', 'absolute']
+ ]
+ },
+ 'search_performance_time': {
+ 'options': [None, 'Time Spent On Queries And Fetches', 'seconds', 'search performance',
+ 'elastic.search_performance_time', 'stacked'],
+ 'lines': [
+ ['indices_search_query_time_in_millis', 'query', 'incremental', 1, 1000],
+ ['indices_search_fetch_time_in_millis', 'fetch', 'incremental', 1, 1000]
+ ]
+ },
+ 'search_latency': {
+ 'options': [None, 'Query And Fetch Latency', 'milliseconds', 'search performance', 'elastic.search_latency', 'stacked'],
+ 'lines': [
+ ['query_latency', 'query', 'absolute', 1, 1000],
+ ['fetch_latency', 'fetch', 'absolute', 1, 1000]
+ ]
+ },
+ 'index_performance_total': {
+ 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'events/s',
+ 'indexing performance', 'elastic.index_performance_total', 'stacked'],
+ 'lines': [
+ ['indices_indexing_index_total', 'indexed', 'incremental'],
+ ['indices_refresh_total', 'refreshes', 'incremental'],
+ ['indices_flush_total', 'flushes', 'incremental']
+ ]
+ },
+ 'index_performance_current': {
+ 'options': [None, 'Number Of Documents Currently Being Indexed', 'currently indexed',
+ 'indexing performance', 'elastic.index_performance_current', 'stacked'],
+ 'lines': [
+ ['indices_indexing_index_current', 'documents', 'absolute']
+ ]
+ },
+ 'index_performance_time': {
+ 'options': [None, 'Time Spent On Indexing, Refreshing, Flushing', 'seconds', 'indexing performance',
+ 'elastic.index_performance_time', 'stacked'],
+ 'lines': [
+ ['indices_indexing_index_time_in_millis', 'indexing', 'incremental', 1, 1000],
+ ['indices_refresh_total_time_in_millis', 'refreshing', 'incremental', 1, 1000],
+ ['indices_flush_total_time_in_millis', 'flushing', 'incremental', 1, 1000]
+ ]
+ },
+ 'index_latency': {
+ 'options': [None, 'Indexing And Flushing Latency', 'milliseconds', 'indexing performance',
+ 'elastic.index_latency', 'stacked'],
+ 'lines': [
+ ['indexing_latency', 'indexing', 'absolute', 1, 1000],
+ ['flushing_latency', 'flushing', 'absolute', 1, 1000]
+ ]
+ },
+ 'index_translog_operations': {
+ 'options': [None, 'Translog Operations', 'operations', 'translog',
+ 'elastic.index_translog_operations', 'area'],
+ 'lines': [
+ ['indices_translog_operations', 'total', 'absolute'],
+ ['indices_translog_uncommitted_operations', 'uncommited', 'absolute']
+ ]
+ },
+ 'index_translog_size': {
+ 'options': [None, 'Translog Size', 'MiB', 'translog',
+ 'elastic.index_translog_size', 'area'],
+ 'lines': [
+ ['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
+ ['indices_translog_uncommitted_size_in_bytes', 'uncommited', 'absolute', 1, 1048567]
+ ]
+ },
+ 'index_segments_count': {
+ 'options': [None, 'Total Number Of Indices Segments', 'segments', 'indices segments',
+ 'elastic.index_segments_count', 'line'],
+ 'lines': [
+ ['indices_segments_count', 'segments', 'absolute']
+ ]
+ },
+ 'index_segments_memory_writer': {
+ 'options': [None, 'Index Writer Memory Usage', 'MiB', 'indices segments',
+ 'elastic.index_segments_memory_writer', 'area'],
+ 'lines': [
+ ['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
+ ]
+ },
+ 'index_segments_memory': {
+ 'options': [None, 'Indices Segments Memory Usage', 'MiB', 'indices segments',
+ 'elastic.index_segments_memory', 'stacked'],
+ 'lines': [
+ ['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567],
+ ['indices_segments_stored_fields_memory_in_bytes', 'stored fields', 'absolute', 1, 1048567],
+ ['indices_segments_term_vectors_memory_in_bytes', 'term vectors', 'absolute', 1, 1048567],
+ ['indices_segments_norms_memory_in_bytes', 'norms', 'absolute', 1, 1048567],
+ ['indices_segments_points_memory_in_bytes', 'points', 'absolute', 1, 1048567],
+ ['indices_segments_doc_values_memory_in_bytes', 'doc values', 'absolute', 1, 1048567],
+ ['indices_segments_version_map_memory_in_bytes', 'version map', 'absolute', 1, 1048567],
+ ['indices_segments_fixed_bit_set_memory_in_bytes', 'fixed bit set', 'absolute', 1, 1048567]
+ ]
+ },
+ 'jvm_mem_heap': {
+ 'options': [None, 'JVM Heap Percentage Currently in Use', 'percentage', 'memory usage and gc',
+ 'elastic.jvm_heap', 'area'],
+ 'lines': [
+ ['jvm_mem_heap_used_percent', 'inuse', 'absolute']
+ ]
+ },
+ 'jvm_mem_heap_bytes': {
+ 'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc',
+ 'elastic.jvm_heap_bytes', 'area'],
+ 'lines': [
+ ['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
+ ['jvm_mem_heap_used_in_bytes', 'used', 'absolute', 1, 1048576]
+ ]
+ },
+ 'jvm_buffer_pool_count': {
+ 'options': [None, 'JVM Buffers', 'pools', 'memory usage and gc',
+ 'elastic.jvm_buffer_pool_count', 'line'],
+ 'lines': [
+ ['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
+ ['jvm_buffer_pools_mapped_count', 'mapped', 'absolute']
+ ]
+ },
+ 'jvm_direct_buffers_memory': {
+ 'options': [None, 'JVM Direct Buffers Memory', 'MiB', 'memory usage and gc',
+ 'elastic.jvm_direct_buffers_memory', 'area'],
+ 'lines': [
+ ['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
+ ['jvm_buffer_pools_direct_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
+ ]
+ },
+ 'jvm_mapped_buffers_memory': {
+ 'options': [None, 'JVM Mapped Buffers Memory', 'MiB', 'memory usage and gc',
+ 'elastic.jvm_mapped_buffers_memory', 'area'],
+ 'lines': [
+ ['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
+ ['jvm_buffer_pools_mapped_total_capacity_in_bytes', 'total capacity', 'absolute', 1, 1048567]
+ ]
+ },
+ 'jvm_gc_count': {
+ 'options': [None, 'Garbage Collections', 'events/s', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
+ 'lines': [
+ ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
+ ['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
+ ]
+ },
+ 'jvm_gc_time': {
+ 'options': [None, 'Time Spent On Garbage Collections', 'milliseconds', 'memory usage and gc',
+ 'elastic.gc_time', 'stacked'],
+ 'lines': [
+ ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
+ ['jvm_gc_collectors_old_collection_time_in_millis', 'old', 'incremental']
+ ]
+ },
+ 'thread_pool_queued': {
+ 'options': [None, 'Number Of Queued Threads In Thread Pool', 'queued threads', 'queues and rejections',
+ 'elastic.thread_pool_queued', 'stacked'],
+ 'lines': [
+ ['thread_pool_bulk_queue', 'bulk', 'absolute'],
+ ['thread_pool_write_queue', 'write', 'absolute'],
+ ['thread_pool_index_queue', 'index', 'absolute'],
+ ['thread_pool_search_queue', 'search', 'absolute'],
+ ['thread_pool_merge_queue', 'merge', 'absolute']
+ ]
+ },
+ 'thread_pool_rejected': {
+ 'options': [None, 'Rejected Threads In Thread Pool', 'rejected threads', 'queues and rejections',
+ 'elastic.thread_pool_rejected', 'stacked'],
+ 'lines': [
+ ['thread_pool_bulk_rejected', 'bulk', 'absolute'],
+ ['thread_pool_write_rejected', 'write', 'absolute'],
+ ['thread_pool_index_rejected', 'index', 'absolute'],
+ ['thread_pool_search_rejected', 'search', 'absolute'],
+ ['thread_pool_merge_rejected', 'merge', 'absolute']
+ ]
+ },
+ 'fielddata_cache': {
+ 'options': [None, 'Fielddata Cache', 'MiB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
+ 'lines': [
+ ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
+ ]
+ },
+ 'fielddata_evictions_tripped': {
+ 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'events/s',
+ 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
+ 'lines': [
+ ['indices_fielddata_evictions', 'evictions', 'incremental'],
+ ['indices_fielddata_tripped', 'tripped', 'incremental']
+ ]
+ },
+ 'cluster_health_nodes': {
+ 'options': [None, 'Nodes Statistics', 'nodes', 'cluster health API',
+ 'elastic.cluster_health_nodes', 'stacked'],
+ 'lines': [
+ ['number_of_nodes', 'nodes', 'absolute'],
+ ['number_of_data_nodes', 'data_nodes', 'absolute'],
+ ]
+ },
+ 'cluster_health_pending_tasks': {
+ 'options': [None, 'Tasks Statistics', 'tasks', 'cluster health API',
+ 'elastic.cluster_health_pending_tasks', 'line'],
+ 'lines': [
+ ['number_of_pending_tasks', 'pending_tasks', 'absolute'],
+ ]
+ },
+ 'cluster_health_flight_fetch': {
+ 'options': [None, 'In Flight Fetches Statistics', 'fetches', 'cluster health API',
+ 'elastic.cluster_health_flight_fetch', 'line'],
+ 'lines': [
+ ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
+ ]
+ },
+ 'cluster_health_status': {
+ 'options': [None, 'Cluster Status', 'status', 'cluster health API',
+ 'elastic.cluster_health_status', 'area'],
+ 'lines': [
+ ['status_green', 'green', 'absolute'],
+ ['status_red', 'red', 'absolute'],
+ ['status_foo1', None, 'absolute'],
+ ['status_foo2', None, 'absolute'],
+ ['status_foo3', None, 'absolute'],
+ ['status_yellow', 'yellow', 'absolute']
+ ]
+ },
+ 'cluster_health_shards': {
+ 'options': [None, 'Shards Statistics', 'shards', 'cluster health API',
+ 'elastic.cluster_health_shards', 'stacked'],
+ 'lines': [
+ ['active_shards', 'active_shards', 'absolute'],
+ ['relocating_shards', 'relocating_shards', 'absolute'],
+ ['unassigned_shards', 'unassigned', 'absolute'],
+ ['delayed_unassigned_shards', 'delayed_unassigned', 'absolute'],
+ ['initializing_shards', 'initializing', 'absolute'],
+ ['active_shards_percent_as_number', 'active_percent', 'absolute']
+ ]
+ },
+ 'cluster_stats_nodes': {
+ 'options': [None, 'Nodes Statistics', 'nodes', 'cluster stats API',
+ 'elastic.cluster_nodes', 'stacked'],
+ 'lines': [
+ ['nodes_count_data_only', 'data_only', 'absolute'],
+ ['nodes_count_master_data', 'master_data', 'absolute'],
+ ['nodes_count_total', 'total', 'absolute'],
+ ['nodes_count_master_only', 'master_only', 'absolute'],
+ ['nodes_count_client', 'client', 'absolute']
+ ]
+ },
+ 'cluster_stats_query_cache': {
+ 'options': [None, 'Query Cache Statistics', 'queries', 'cluster stats API',
+ 'elastic.cluster_query_cache', 'stacked'],
+ 'lines': [
+ ['indices_query_cache_hit_count', 'hit', 'incremental'],
+ ['indices_query_cache_miss_count', 'miss', 'incremental']
+ ]
+ },
+ 'cluster_stats_docs': {
+ 'options': [None, 'Docs Statistics', 'docs', 'cluster stats API',
+ 'elastic.cluster_docs', 'line'],
+ 'lines': [
+ ['indices_docs_count', 'docs', 'absolute']
+ ]
+ },
+ 'cluster_stats_store': {
+ 'options': [None, 'Store Statistics', 'MiB', 'cluster stats API',
+ 'elastic.cluster_store', 'line'],
+ 'lines': [
+ ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
+ ]
+ },
+ 'cluster_stats_indices': {
+ 'options': [None, 'Indices Statistics', 'indices', 'cluster stats API',
+ 'elastic.cluster_indices', 'line'],
+ 'lines': [
+ ['indices_count', 'indices', 'absolute'],
+ ]
+ },
+ 'cluster_stats_shards_total': {
+ 'options': [None, 'Total Shards Statistics', 'shards', 'cluster stats API',
+ 'elastic.cluster_shards_total', 'line'],
+ 'lines': [
+ ['indices_shards_total', 'shards', 'absolute']
+ ]
+ },
+ 'host_metrics_transport': {
+ 'options': [None, 'Cluster Communication Transport Metrics', 'kilobit/s', 'host metrics',
+ 'elastic.host_transport', 'area'],
+ 'lines': [
+ ['transport_rx_size_in_bytes', 'in', 'incremental', 8, 1000],
+ ['transport_tx_size_in_bytes', 'out', 'incremental', -8, 1000]
+ ]
+ },
+ 'host_metrics_file_descriptors': {
+ 'options': [None, 'Available File Descriptors In Percent', 'percentage', 'host metrics',
+ 'elastic.host_descriptors', 'area'],
+ 'lines': [
+ ['file_descriptors_used', 'used', 'absolute', 1, 10]
+ ]
+ },
+ 'host_metrics_http': {
+ 'options': [None, 'Opened HTTP Connections', 'connections', 'host metrics',
+ 'elastic.host_http_connections', 'line'],
+ 'lines': [
+ ['http_current_open', 'opened', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host')
+ self.port = self.configuration.get('port', 9200)
+ self.url = '{scheme}://{host}:{port}'.format(
+ scheme=self.configuration.get('scheme', 'http'),
+ host=self.host,
+ port=self.port,
+ )
+ self.latency = dict()
+ self.methods = list()
+
+ def check(self):
+ if not all([self.host,
+ self.port,
+ isinstance(self.host, str),
+ isinstance(self.port, (str, int))]):
+ self.error('Host is not defined in the module configuration file')
+ return False
+
+ # Hostname -> ip address
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(str(error))
+ return False
+
+ # Create URL for every Elasticsearch API
+ self.methods = [METHODS(get_data=self._get_node_stats,
+ url=self.url + '/_nodes/_local/stats',
+ run=self.configuration.get('node_stats', True)),
+ METHODS(get_data=self._get_cluster_health,
+ url=self.url + '/_cluster/health',
+ run=self.configuration.get('cluster_health', True)),
+ METHODS(get_data=self._get_cluster_stats,
+ url=self.url + '/_cluster/stats',
+ run=self.configuration.get('cluster_stats', True))]
+
+ # Remove disabled API calls from 'avail methods'
+ return UrlService.check(self)
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ if not method.run:
+ continue
+ th = threading.Thread(target=method.get_data,
+ args=(queue, method.url))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ return result or None
+
+ def _get_cluster_health(self, queue, url):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
+ to_netdata = fetch_data_(raw_data=data,
+ metrics=HEALTH_STATS)
+
+ to_netdata.update({'status_green': 0, 'status_red': 0, 'status_yellow': 0,
+ 'status_foo1': 0, 'status_foo2': 0, 'status_foo3': 0})
+ current_status = 'status_' + data['status']
+ to_netdata[current_status] = 1
+
+ return queue.put(to_netdata)
+
+ def _get_cluster_stats(self, queue, url):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
+ to_netdata = fetch_data_(raw_data=data,
+ metrics=CLUSTER_STATS)
+
+ return queue.put(to_netdata)
+
+ def _get_node_stats(self, queue, url):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+
+ data = self.json_reply(raw_data)
+
+ if not data:
+ return queue.put(dict())
+
+ node = list(data['nodes'].keys())[0]
+ to_netdata = fetch_data_(raw_data=data['nodes'][node],
+ metrics=NODE_STATS)
+
+ # Search, index, flush, fetch performance latency
+ for key in LATENCY:
+ try:
+ to_netdata[key] = self.find_avg(total=to_netdata[LATENCY[key]['total']],
+ spent_time=to_netdata[LATENCY[key]['spent_time']],
+ key=key)
+ except KeyError:
+ continue
+ if 'process_open_file_descriptors' in to_netdata and 'process_max_file_descriptors' in to_netdata:
+ to_netdata['file_descriptors_used'] = round(float(to_netdata['process_open_file_descriptors'])
+ / to_netdata['process_max_file_descriptors'] * 1000)
+
+ return queue.put(to_netdata)
+
+ def json_reply(self, reply):
+ try:
+ return json.loads(reply)
+ except ValueError as err:
+ self.error(err)
+ return None
+
+ def find_avg(self, total, spent_time, key):
+ if key not in self.latency:
+ self.latency[key] = dict(total=total,
+ spent_time=spent_time)
+ return 0
+ if self.latency[key]['total'] != total:
+ latency = float(spent_time - self.latency[key]['spent_time'])\
+ / float(total - self.latency[key]['total']) * 1000
+ self.latency[key]['total'] = total
+ self.latency[key]['spent_time'] = spent_time
+ return latency
+ self.latency[key]['spent_time'] = spent_time
+ return 0
+
+
+def fetch_data_(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
new file mode 100644
index 0000000..e5c97e7
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
@@ -0,0 +1,81 @@
+# netdata python.d.plugin configuration for elasticsearch stats
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, elasticsearch plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname.
+# port: 'port' # Port on which elasticsearch listen.
+# cluster_health: False/True # Calls to cluster health elasticsearch API. Enabled by default.
+# cluster_stats: False/True # Calls to cluster stats elasticsearch API. Enabled by default.
+#
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ host: '127.0.0.1'
+ port: '9200'
diff --git a/collectors/python.d.plugin/example/Makefile.inc b/collectors/python.d.plugin/example/Makefile.inc
new file mode 100644
index 0000000..1b027d5
--- /dev/null
+++ b/collectors/python.d.plugin/example/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += example/example.chart.py
+dist_pythonconfig_DATA += example/example.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += example/README.md example/Makefile.inc
+
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
new file mode 100644
index 0000000..d65f8cf
--- /dev/null
+++ b/collectors/python.d.plugin/example/README.md
@@ -0,0 +1,5 @@
+# example
+
+An example python data collection module.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexample%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
new file mode 100644
index 0000000..cc8c187
--- /dev/null
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Description: example netdata python.d module
+# Author: Put your name here (your github login)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from random import SystemRandom
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+priority = 90000
+
+ORDER = [
+ 'random',
+]
+
+CHARTS = {
+ 'random': {
+ 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
+ 'lines': [
+ ['random1']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.random = SystemRandom()
+
+ @staticmethod
+ def check():
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ for i in range(1, 4):
+ dimension_id = ''.join(['random', str(i)])
+
+ if dimension_id not in self.charts['random']:
+ self.charts['random'].add_dimension([dimension_id])
+
+ data[dimension_id] = self.random.randint(0, 100)
+
+ return data
diff --git a/collectors/python.d.plugin/example/example.conf b/collectors/python.d.plugin/example/example.conf
new file mode 100644
index 0000000..3d84351
--- /dev/null
+++ b/collectors/python.d.plugin/example/example.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# - none
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/exim/Makefile.inc b/collectors/python.d.plugin/exim/Makefile.inc
new file mode 100644
index 0000000..36ffa56
--- /dev/null
+++ b/collectors/python.d.plugin/exim/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += exim/exim.chart.py
+dist_pythonconfig_DATA += exim/exim.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += exim/README.md exim/Makefile.inc
+
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
new file mode 100644
index 0000000..1cebb27
--- /dev/null
+++ b/collectors/python.d.plugin/exim/README.md
@@ -0,0 +1,15 @@
+# exim
+
+Simple module executing `exim -bpc` to grab exim queue.
+This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
+
+It produces only one chart:
+
+1. **Exim Queue Emails**
+ * emails
+
+Configuration is not needed.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fexim%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
new file mode 100644
index 0000000..68b7b5c
--- /dev/null
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Description: exim netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+
+EXIM_COMMAND = 'exim -bpc'
+
+ORDER = [
+ 'qemails',
+]
+
+CHARTS = {
+ 'qemails': {
+ 'options': [None, 'Exim Queue Emails', 'emails', 'queue', 'exim.qemails', 'line'],
+ 'lines': [
+ ['emails', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = EXIM_COMMAND
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ try:
+ return {'emails': int(self._get_raw_data()[0])}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/exim/exim.conf b/collectors/python.d.plugin/exim/exim.conf
new file mode 100644
index 0000000..3b7e659
--- /dev/null
+++ b/collectors/python.d.plugin/exim/exim.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for exim
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# exim is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, exim also supports the following:
+#
+# command: 'exim -bpc' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# REQUIRED exim CONFIGURATION
+#
+# netdata will query exim as user netdata.
+# By default exim will refuse to respond.
+#
+# To allow querying exim as non-admin user, please set the following
+# to your exim configuration:
+#
+# queue_list_requires_admin = false
+#
+# Your exim configuration should be in
+#
+# /etc/exim/exim4.conf
+# or
+# /etc/exim4/conf.d/main/000_local_options
+#
+# Please consult your distribution information to find the exact file.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'exim -bpc'
diff --git a/collectors/python.d.plugin/fail2ban/Makefile.inc b/collectors/python.d.plugin/fail2ban/Makefile.inc
new file mode 100644
index 0000000..31e117e
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += fail2ban/fail2ban.chart.py
+dist_pythonconfig_DATA += fail2ban/fail2ban.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += fail2ban/README.md fail2ban/Makefile.inc
+
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
new file mode 100644
index 0000000..2651198
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -0,0 +1,25 @@
+# fail2ban
+
+Module monitor fail2ban log file to show all bans for all active jails
+
+**Requirements:**
+ * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
+
+It produces one chart with multiple lines (one line per jail)
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ log_path: '/var/log/fail2ban.log'
+ conf_path: '/etc/fail2ban/jail.local'
+ exclude: 'dropbear apache'
+```
+If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
+If conf file is not found default jail is `ssh`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffail2ban%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
new file mode 100644
index 0000000..dfd2fea
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+# Description: fail2ban log netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+import os
+
+from collections import defaultdict
+from glob import glob
+
+from bases.FrameworkServices.LogService import LogService
+
+
+ORDER = [
+ 'jails_bans',
+ 'jails_in_jail',
+]
+
+
+def charts(jails):
+ """
+ Chart definitions creating
+ """
+
+ ch = {
+ ORDER[0]: {
+ 'options': [None, 'Jails Ban Rate', 'bans/s', 'bans', 'jail.bans', 'line'],
+ 'lines': []
+ },
+ ORDER[1]: {
+ 'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs', 'in jail',
+ 'jail.in_jail', 'line'],
+ 'lines': []
+ },
+ }
+ for jail in jails:
+ dim = [
+ jail,
+ jail,
+ 'incremental',
+ ]
+ ch[ORDER[0]]['lines'].append(dim)
+
+ dim = [
+ '{0}_in_jail'.format(jail),
+ jail,
+ 'absolute',
+ ]
+ ch[ORDER[1]]['lines'].append(dim)
+
+ return ch
+
+
+RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= (true|false)')
+
+# Example:
+# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 195.201.88.33
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 217.59.246.27
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Restore Ban 217.59.246.27
+RE_DATA = re.compile(r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>Unban|Ban|Restore Ban) (?P<ip>[a-f0-9.:]+)')
+
+DEFAULT_JAILS = [
+ 'ssh',
+]
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ LogService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = dict()
+ self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
+ self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
+ self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
+ self.exclude = self.configuration.get('exclude', str())
+ self.monitoring_jails = list()
+ self.banned_ips = defaultdict(set)
+ self.data = dict()
+
+ def check(self):
+ """
+ :return: bool
+ """
+ if not self.conf_path.endswith(('.conf', '.local')):
+ self.error('{0} is a wrong conf path name, must be *.conf or *.local'.format(self.conf_path))
+ return False
+
+ if not os.access(self.log_path, os.R_OK):
+ self.error('{0} is not readable'.format(self.log_path))
+ return False
+
+ if os.path.getsize(self.log_path) == 0:
+ self.error('{0} is empty'.format(self.log_path))
+ return False
+
+ self.monitoring_jails = self.jails_auto_detection()
+ for jail in self.monitoring_jails:
+ self.data[jail] = 0
+ self.data['{0}_in_jail'.format(jail)] = 0
+
+ self.definitions = charts(self.monitoring_jails)
+ self.info('monitoring jails: {0}'.format(self.monitoring_jails))
+
+ return True
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ raw = self._get_raw_data()
+
+ if not raw:
+ return None if raw is None else self.data
+
+ for row in raw:
+ match = RE_DATA.search(row)
+
+ if not match:
+ continue
+
+ match = match.groupdict()
+
+ if match['jail'] not in self.monitoring_jails:
+ continue
+
+ jail, action, ip = match['jail'], match['action'], match['ip']
+
+ if action == 'Ban' or action == 'Restore Ban':
+ self.data[jail] += 1
+ if ip not in self.banned_ips[jail]:
+ self.banned_ips[jail].add(ip)
+ self.data['{0}_in_jail'.format(jail)] += 1
+ else:
+ if ip in self.banned_ips[jail]:
+ self.banned_ips[jail].remove(ip)
+ self.data['{0}_in_jail'.format(jail)] -= 1
+
+ return self.data
+
+ def get_files_from_dir(self, dir_path, suffix):
+ """
+ :return: list
+ """
+ if not os.path.isdir(dir_path):
+ self.error('{0} is not a directory'.format(dir_path))
+ return list()
+
+ return glob('{0}/*.{1}'.format(self.conf_dir, suffix))
+
+ def get_jails_from_file(self, file_path):
+ """
+ :return: list
+ """
+ if not os.access(file_path, os.R_OK):
+ self.error('{0} is not readable or not exist'.format(file_path))
+ return list()
+
+ with open(file_path, 'rt') as f:
+ lines = f.readlines()
+ raw = ' '.join(line for line in lines if line.startswith(('[', 'enabled')))
+
+ match = RE_JAILS.findall(raw)
+ # Result: [('ssh', 'true'), ('dropbear', 'true'), ('pam-generic', 'true'), ...]
+
+ if not match:
+ self.debug('{0} parse failed'.format(file_path))
+ return list()
+
+ return match
+
+ def jails_auto_detection(self):
+ """
+ :return: list
+
+ Parses jail configuration files. Returns list of enabled jails.
+ According man jail.conf parse order must be
+ * jail.conf
+ * jail.d/*.conf (in alphabetical order)
+ * jail.local
+ * jail.d/*.local (in alphabetical order)
+ """
+ jails_files, all_jails, active_jails = list(), list(), list()
+
+ jails_files.append('{0}.conf'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'conf'))
+ jails_files.append('{0}.local'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'local'))
+
+ self.debug('config files to parse: {0}'.format(jails_files))
+
+ for f in jails_files:
+ all_jails.extend(self.get_jails_from_file(f))
+
+ exclude = self.exclude.split()
+
+ for name, status in all_jails:
+ if name in exclude:
+ continue
+
+ if status == 'true' and name not in active_jails:
+ active_jails.append(name)
+ elif status == 'false' and name in active_jails:
+ active_jails.remove(name)
+
+ return active_jails or DEFAULT_JAILS
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf
new file mode 100644
index 0000000..a36436b
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for fail2ban
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, fail2ban also supports the following:
+#
+# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log'
+# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local'
+# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/'
+# exclude: 'jails you want to exclude from autodetection' # Default: none
+#------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/freeradius/Makefile.inc b/collectors/python.d.plugin/freeradius/Makefile.inc
new file mode 100644
index 0000000..54aa649
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += freeradius/freeradius.chart.py
+dist_pythonconfig_DATA += freeradius/freeradius.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += freeradius/README.md freeradius/Makefile.inc
+
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
new file mode 100644
index 0000000..00eb50d
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -0,0 +1,72 @@
+# freeradius
+
+Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
+
+It produces:
+
+1. **Authentication counters:**
+ * access-accepts
+ * access-rejects
+ * auth-dropped-requests
+ * auth-duplicate-requests
+ * auth-invalid-requests
+ * auth-malformed-requests
+ * auth-unknown-types
+
+2. **Accounting counters:** [optional]
+ * accounting-requests
+ * accounting-responses
+ * acct-dropped-requests
+ * acct-duplicate-requests
+ * acct-invalid-requests
+ * acct-malformed-requests
+ * acct-unknown-types
+
+3. **Proxy authentication counters:** [optional]
+ * proxy-access-accepts
+ * proxy-access-rejects
+ * proxy-auth-dropped-requests
+ * proxy-auth-duplicate-requests
+ * proxy-auth-invalid-requests
+ * proxy-auth-malformed-requests
+ * proxy-auth-unknown-types
+
+4. **Proxy accounting counters:** [optional]
+ * proxy-accounting-requests
+ * proxy-accounting-responses
+ * proxy-acct-dropped-requests
+ * proxy-acct-duplicate-requests
+ * proxy-acct-invalid-requests
+ * proxy-acct-malformed-requests
+ * proxy-acct-unknown-typesa
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'localhost'
+ port : '18121'
+ secret : 'adminsecret'
+ acct : False # Freeradius accounting statistics.
+ proxy_auth : False # Freeradius proxy authentication statistics.
+ proxy_acct : False # Freeradius proxy accounting statistics.
+```
+
+**Freeradius server configuration:**
+
+The configuration for the status server is automatically created in the sites-available directory.
+By default, server is enabled and can be queried from every client.
+FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
+
+To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
+ * cd sites-enabled
+ * ln -s ../sites-available/status status
+
+and restart/reload your FREERADIUS server.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ffreeradius%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/freeradius/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
new file mode 100644
index 0000000..8563660
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Description: freeradius netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+from subprocess import Popen, PIPE
+
+from bases.collection import find_binary
+from bases.FrameworkServices.SimpleService import SimpleService
+
+update_every = 15
+
+PARSER = re.compile(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)')
+
+RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept'
+
+RADCLIENT_RETRIES = 1
+RADCLIENT_TIMEOUT = 1
+
+DEFAULT_HOST = 'localhost'
+DEFAULT_PORT = 18121
+DEFAULT_DO_ACCT = False
+DEFAULT_DO_PROXY_AUTH = False
+DEFAULT_DO_PROXY_ACCT = False
+
+ORDER = [
+ 'authentication',
+ 'accounting',
+ 'proxy-auth',
+ 'proxy-acct',
+]
+
+CHARTS = {
+ 'authentication': {
+ 'options': [None, 'Authentication', 'packets/s', 'authentication', 'freerad.auth', 'line'],
+ 'lines': [
+ ['access-accepts', None, 'incremental'],
+ ['access-rejects', None, 'incremental'],
+ ['auth-dropped-requests', 'dropped-requests', 'incremental'],
+ ['auth-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['auth-invalid-requests', 'invalid-requests', 'incremental'],
+ ['auth-malformed-requests', 'malformed-requests', 'incremental'],
+ ['auth-unknown-types', 'unknown-types', 'incremental']
+ ]
+ },
+ 'accounting': {
+ 'options': [None, 'Accounting', 'packets/s', 'accounting', 'freerad.acct', 'line'],
+ 'lines': [
+ ['accounting-requests', 'requests', 'incremental'],
+ ['accounting-responses', 'responses', 'incremental'],
+ ['acct-dropped-requests', 'dropped-requests', 'incremental'],
+ ['acct-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['acct-invalid-requests', 'invalid-requests', 'incremental'],
+ ['acct-malformed-requests', 'malformed-requests', 'incremental'],
+ ['acct-unknown-types', 'unknown-types', 'incremental']
+ ]
+ },
+ 'proxy-auth': {
+ 'options': [None, 'Proxy Authentication', 'packets/s', 'authentication', 'freerad.proxy.auth', 'line'],
+ 'lines': [
+ ['proxy-access-accepts', 'access-accepts', 'incremental'],
+ ['proxy-access-rejects', 'access-rejects', 'incremental'],
+ ['proxy-auth-dropped-requests', 'dropped-requests', 'incremental'],
+ ['proxy-auth-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['proxy-auth-invalid-requests', 'invalid-requests', 'incremental'],
+ ['proxy-auth-malformed-requests', 'malformed-requests', 'incremental'],
+ ['proxy-auth-unknown-types', 'unknown-types', 'incremental']
+ ]
+ },
+ 'proxy-acct': {
+ 'options': [None, 'Proxy Accounting', 'packets/s', 'accounting', 'freerad.proxy.acct', 'line'],
+ 'lines': [
+ ['proxy-accounting-requests', 'requests', 'incremental'],
+ ['proxy-accounting-responses', 'responses', 'incremental'],
+ ['proxy-acct-dropped-requests', 'dropped-requests', 'incremental'],
+ ['proxy-acct-duplicate-requests', 'duplicate-requests', 'incremental'],
+ ['proxy-acct-invalid-requests', 'invalid-requests', 'incremental'],
+ ['proxy-acct-malformed-requests', 'malformed-requests', 'incremental'],
+ ['proxy-acct-unknown-types', 'unknown-types', 'incremental']
+ ]
+ }
+}
+
+
+def radclient_status(radclient, retries, timeout, host, port, secret):
+ # radclient -r 1 -t 1 -x 127.0.0.1:18121 status secret
+
+ return '{radclient} -r {num_retries} -t {timeout} -x {host}:{port} status {secret}'.format(
+ radclient=radclient,
+ num_retries=retries,
+ timeout=timeout,
+ host=host,
+ port=port,
+ secret=secret,
+ ).split()
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', DEFAULT_HOST)
+ self.port = self.configuration.get('port', DEFAULT_PORT)
+ self.secret = self.configuration.get('secret')
+ self.do_acct = self.configuration.get('acct', DEFAULT_DO_ACCT)
+ self.do_proxy_auth = self.configuration.get('proxy_auth', DEFAULT_DO_PROXY_AUTH)
+ self.do_proxy_acct = self.configuration.get('proxy_acct', DEFAULT_DO_PROXY_ACCT)
+ self.echo = find_binary('echo')
+ self.radclient = find_binary('radclient')
+ self.sub_echo = [self.echo, RADIUS_MSG]
+ self.sub_radclient = radclient_status(
+ self.radclient, RADCLIENT_RETRIES, RADCLIENT_TIMEOUT, self.host, self.port, self.secret,
+ )
+
+ def check(self):
+ if not self.radclient:
+ self.error("Can't locate 'radclient' binary or binary is not executable by netdata user")
+ return False
+
+ if not self.echo:
+ self.error("Can't locate 'echo' binary or binary is not executable by netdata user")
+ return None
+
+ if not self.secret:
+ self.error("'secret' isn't set")
+ return None
+
+ if not self.get_raw_data():
+ self.error('Request returned no data. Is server alive?')
+ return False
+
+ if not self.do_acct:
+ self.order.remove('accounting')
+
+ if not self.do_proxy_auth:
+ self.order.remove('proxy-auth')
+
+ if not self.do_proxy_acct:
+ self.order.remove('proxy-acct')
+
+ return True
+
+ def get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ result = self.get_raw_data()
+
+ if not result:
+ return None
+
+ return dict(
+ (key.lower(), value) for key, value in PARSER.findall(result)
+ )
+
+ def get_raw_data(self):
+ """
+ The following code is equivalent to
+ 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept"
+ | radclient -t 1 -r 1 host:port status secret'
+ :return: str
+ """
+ try:
+ process_echo = Popen(self.sub_echo, stdout=PIPE, stderr=PIPE, shell=False)
+ process_rad = Popen(self.sub_radclient, stdin=process_echo.stdout, stdout=PIPE, stderr=PIPE, shell=False)
+ process_echo.stdout.close()
+ raw_result = process_rad.communicate()[0]
+ except OSError:
+ return None
+
+ if process_rad.returncode is 0:
+ return raw_result.decode()
+
+ return None
diff --git a/collectors/python.d.plugin/freeradius/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf
new file mode 100644
index 0000000..74b2737
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/freeradius.conf
@@ -0,0 +1,80 @@
+# netdata python.d.plugin configuration for freeradius
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, freeradius also supports the following:
+#
+# host: 'host' # Default: 'localhost'. Server ip address or hostname.
+# port: 'port' # Default: '18121'. Port on which freeradius server listen (type = status).
+# secret: 'secret' # Default: 'adminsecret'.
+# acct: yes/no # Default: no. Freeradius accounting statistics.
+# proxy_auth: yes/no # Default: no. Freeradius proxy authentication statistics.
+# proxy_acct: yes/no # Default: no. Freeradius proxy accounting statistics.
+#
+# ------------------------------------------------------------------------------------------------------------------
+# Freeradius server configuration:
+# The configuration for the status server is automatically created in the sites-available directory.
+# By default, server is enabled and can be queried from every client.
+# FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
+# To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
+# cd sites-enabled
+# ln -s ../sites-available/status status
+# and restart/reload your FREERADIUS server.
+# ------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/go_expvar/Makefile.inc b/collectors/python.d.plugin/go_expvar/Makefile.inc
new file mode 100644
index 0000000..74f50d7
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += go_expvar/go_expvar.chart.py
+dist_pythonconfig_DATA += go_expvar/go_expvar.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += go_expvar/README.md go_expvar/Makefile.inc
+
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
new file mode 100644
index 0000000..3942a7b
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -0,0 +1,277 @@
+# go_expvar
+
+The `go_expvar` module can monitor any Go application that exposes its metrics with the use of
+`expvar` package from the Go standard library.
+
+`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts.
+
+For the memory statistics, it produces the following charts:
+
+1. **Heap allocations** in kB
+ * alloc: size of objects allocated on the heap
+ * inuse: size of allocated heap spans
+
+2. **Stack allocations** in kB
+ * inuse: size of allocated stack spans
+
+3. **MSpan allocations** in kB
+ * inuse: size of allocated mspan structures
+
+4. **MCache allocations** in kB
+ * inuse: size of allocated mcache structures
+
+5. **Virtual memory** in kB
+ * sys: size of reserved virtual address space
+
+6. **Live objects**
+ * live: number of live objects in memory
+
+7. **GC pauses average** in ns
+ * avg: average duration of all GC stop-the-world pauses
+
+
+## Monitoring Go Applications
+
+Netdata can be used to monitor running Go applications that expose their metrics with
+the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library.
+
+The `expvar` package exposes these metrics over HTTP and is very easy to use.
+Consider this minimal sample below:
+
+```go
+package main
+
+import (
+ _ "expvar"
+ "net/http"
+)
+
+func main() {
+ http.ListenAndServe("127.0.0.1:8080", nil)
+}
+```
+
+When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that
+exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening
+the URL in your browser (or by using `wget` or `curl`).
+
+Sample output:
+
+```json
+{
+"cmdline": ["./expvar-demo-binary"],
+"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <ommited for brevity>}
+}
+```
+
+You can of course expose and monitor your own variables as well.
+Here is a sample Go application that exposes a few custom variables:
+
+```go
+package main
+
+import (
+ "expvar"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func main() {
+
+ tick := time.NewTicker(1 * time.Second)
+ num_go := expvar.NewInt("runtime.goroutines")
+ counters := expvar.NewMap("counters")
+ counters.Set("cnt1", new(expvar.Int))
+ counters.Set("cnt2", new(expvar.Float))
+
+ go http.ListenAndServe(":8080", nil)
+
+ for {
+ select {
+ case <- tick.C:
+ num_go.Set(int64(runtime.NumGoroutine()))
+ counters.Add("cnt1", 1)
+ counters.AddFloat("cnt2", 1.452)
+ }
+ }
+}
+```
+
+Apart from the runtime memory stats, this application publishes two counters and the
+number of currently running Goroutines and updates these stats every second.
+
+In the next section, we will cover how to monitor and chart these exposed stats with
+the use of `netdata`s ```go_expvar``` module.
+
+### Using netdata go_expvar module
+
+The `go_expvar` module is disabled by default. To enable it, edit [`python.d.conf`](../python.d.conf)
+(to edit it on your system run `/etc/netdata/edit-config python.d.conf`), and change the `go_expvar`
+variable to `yes`:
+
+```
+# Enable / Disable python.d.plugin modules
+#default_run: yes
+#
+# If "default_run" = "yes" the default for all modules is enabled (yes).
+# Setting any of these to "no" will disable it.
+#
+# If "default_run" = "no" the default for all modules is disabled (no).
+# Setting any of these to "yes" will enable it.
+...
+go_expvar: yes
+...
+```
+
+Next, we need to edit the module configuration file (found at [`/etc/netdata/python.d/go_expvar.conf`](go_expvar.conf) by default)
+(to edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`).
+The module configuration consists of jobs, where each job can be used to monitor a separate Go application.
+Let's see a sample job configuration:
+
+```
+# /etc/netdata/python.d/go_expvar.conf
+
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts: {}
+```
+
+Let's go over each of the defined options:
+
+ name: 'app1'
+
+This is the job name that will appear at the netdata dashboard.
+If not defined, the job_name (top level key) will be used.
+
+ url: 'http://127.0.0.1:8080/debug/vars'
+
+This is the URL of the expvar endpoint. As the expvar handler can be installed
+in a custom path, the whole URL has to be specified. This value is mandatory.
+
+ collect_memstats: true
+
+Whether to enable collecting stats about Go runtime's memory. You can find more
+information about the exposed values at the [runtime package docs](https://golang.org/pkg/runtime/#MemStats).
+
+ extra_charts: {}
+
+Enables the user to specify custom expvars to monitor and chart.
+Will be explained in more detail below.
+
+**Note: if `collect_memstats` is disabled and no `extra_charts` are defined, the plugin will
+disable itself, as there will be no data to collect!**
+
+Apart from these options, each job supports options inherited from netdata's `python.d.plugin`
+and its base `UrlService` class. These are:
+
+ update_every: 1 # the job's data collection frequency
+ priority: 60000 # the job's order on the dashboard
+ user: admin # use when the expvar endpoint is protected by HTTP Basic Auth
+ password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth
+
+### Monitoring custom vars with go_expvar
+
+Now, memory stats might be useful, but what if you want netdata to monitor some custom values
+that your Go application exposes? The `go_expvar` module can do that as well with the use of
+the `extra_charts` configuration variable.
+
+The `extra_charts` variable is a YaML list of netdata chart definitions.
+Each chart definition has the following keys:
+
+ id: netdata chart ID
+ options: a key-value mapping of chart options
+ lines: a list of line definitions
+
+**Note: please do not use dots in the chart or line ID field.
+See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
+
+Please see these two links to the official netdata documentation for more information about the values:
+
+- [External plugins - charts](../../plugins.d/#chart)
+- [Chart variables](../#global-variables-order-and-chart)
+
+**Line definitions**
+
+Each chart can define multiple lines (dimensions).
+A line definition is a key-value mapping of line options.
+Each line can have the following options:
+
+ # mandatory
+ expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
+ expvar_type: value type; supported are "float" or "int"
+ id: the id of this line/dimension in netdata
+
+ # optional - netdata defaults are used if these options are not defined
+ name: ''
+ algorithm: absolute
+ multiplier: 1
+ divisor: 100 if expvar_type == float, 1 if expvar_type == int
+ hidden: False
+
+Please see the following link for more information about the options and their default values:
+[External plugins - dimensions](../../plugins.d/#dimension)
+
+Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
+All dicts in the resulting JSON document are then flattened to one level.
+Expvar names are joined together with '.' when flattening.
+
+Example:
+```
+{
+ "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
+ "runtime.goroutines": 5
+}
+```
+
+In the above case, the exported variables will be available under `runtime.goroutines`,
+`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
+the first defined key wins and all subsequent keys with the same name are ignored.
+
+**Configuration example**
+
+The configuration below matches the second Go application described above.
+Netdata will monitor and chart memory stats for the application, as well as a custom chart of
+running goroutines and two dummy counters.
+
+```
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts:
+ - id: "runtime_goroutines"
+ options:
+ name: num_goroutines
+ title: "runtime: number of goroutines"
+ units: goroutines
+ family: runtime
+ context: expvar.runtime.goroutines
+ chart_type: line
+ lines:
+ - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+ - id: "foo_counters"
+ options:
+ name: counters
+ title: "some random counters"
+ units: awesomeness
+ family: counters
+ context: expvar.foo.counters
+ chart_type: line
+ lines:
+ - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+ - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+```
+
+**Netdata charts example**
+
+The images below show how do the final charts in netdata look.
+
+![Memory stats charts](https://cloud.githubusercontent.com/assets/15180106/26762052/62b4af58-493b-11e7-9e69-146705acfc2c.png)
+
+![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png)
+
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fgo_expvar%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
new file mode 100644
index 0000000..e82a877
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+# Description: go_expvar netdata python.d module
+# Author: Jan Kral (kralewitz)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import division
+import json
+
+from collections import namedtuple
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+MEMSTATS_ORDER = [
+ 'memstats_heap',
+ 'memstats_stack',
+ 'memstats_mspan',
+ 'memstats_mcache',
+ 'memstats_sys',
+ 'memstats_live_objects',
+ 'memstats_gc_pauses',
+]
+
+MEMSTATS_CHARTS = {
+ 'memstats_heap': {
+ 'options': ['heap', 'memory: size of heap memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.heap', 'line'],
+ 'lines': [
+ ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
+ ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_stack': {
+ 'options': ['stack', 'memory: size of stack memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.stack', 'line'],
+ 'lines': [
+ ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_mspan': {
+ 'options': ['mspan', 'memory: size of mspan memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.mspan', 'line'],
+ 'lines': [
+ ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_mcache': {
+ 'options': ['mcache', 'memory: size of mcache memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.mcache', 'line'],
+ 'lines': [
+ ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_live_objects': {
+ 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats',
+ 'expvar.memstats.live_objects', 'line'],
+ 'lines': [
+ ['memstats_live_objects', 'live']
+ ]
+ },
+ 'memstats_sys': {
+ 'options': ['sys', 'memory: size of reserved virtual address space', 'KiB', 'memstats',
+ 'expvar.memstats.sys', 'line'],
+ 'lines': [
+ ['memstats_sys', 'sys', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_gc_pauses': {
+ 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats',
+ 'expvar.memstats.gc_pauses', 'line'],
+ 'lines': [
+ ['memstats_gc_pauses', 'avg']
+ ]
+ }
+}
+
+EXPVAR = namedtuple(
+ "EXPVAR",
+ [
+ "key",
+ "type",
+ "id",
+ ]
+)
+
+
+def flatten(d, top='', sep='.'):
+ items = []
+ for key, val in d.items():
+ nkey = top + sep + key if top else key
+ if isinstance(val, dict):
+ items.extend(flatten(val, nkey, sep=sep).items())
+ else:
+ items.append((nkey, val))
+ return dict(items)
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ # if memstats collection is enabled, add the charts and their order
+ if self.configuration.get('collect_memstats'):
+ self.definitions = dict(MEMSTATS_CHARTS)
+ self.order = list(MEMSTATS_ORDER)
+ else:
+ self.definitions = dict()
+ self.order = list()
+
+ # if extra charts are defined, parse their config
+ extra_charts = self.configuration.get('extra_charts')
+ if extra_charts:
+ self._parse_extra_charts_config(extra_charts)
+
+ def check(self):
+ """
+ Check if the module can collect data:
+ 1) At least one JOB configuration has to be specified
+ 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one
+ extra_chart must be defined.
+
+ The configuration and URL check is provided by the UrlService class.
+ """
+
+ if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')):
+ self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.')
+ return False
+
+ return UrlService.check(self)
+
+ def _parse_extra_charts_config(self, extra_charts_config):
+
+ # a place to store the expvar keys and their types
+ self.expvars = list()
+
+ for chart in extra_charts_config:
+
+ chart_dict = dict()
+ chart_id = chart.get('id')
+ chart_lines = chart.get('lines')
+ chart_opts = chart.get('options', dict())
+
+ if not all([chart_id, chart_lines]):
+ self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart))
+ continue
+
+ chart_dict['options'] = [
+ chart_opts.get('name', ''),
+ chart_opts.get('title', ''),
+ chart_opts.get('units', ''),
+ chart_opts.get('family', ''),
+ chart_opts.get('context', ''),
+ chart_opts.get('chart_type', 'line')
+ ]
+ chart_dict['lines'] = list()
+
+ # add the lines to the chart
+ for line in chart_lines:
+
+ ev_key = line.get('expvar_key')
+ ev_type = line.get('expvar_type')
+ line_id = line.get('id')
+
+ if not all([ev_key, ev_type, line_id]):
+ self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line))
+ continue
+
+ if ev_type not in ['int', 'float']:
+ self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
+ continue
+
+ # self.expvars[ev_key] = (ev_type, line_id)
+ self.expvars.append(EXPVAR(ev_key, ev_type, line_id))
+
+ chart_dict['lines'].append(
+ [
+ line.get('id', ''),
+ line.get('name', ''),
+ line.get('algorithm', ''),
+ line.get('multiplier', 1),
+ line.get('divisor', 100 if ev_type == 'float' else 1),
+ line.get('hidden', False)
+ ]
+ )
+
+ self.order.append(chart_id)
+ self.definitions[chart_id] = chart_dict
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = json.loads(raw_data)
+
+ expvars = dict()
+ if self.configuration.get('collect_memstats'):
+ expvars.update(self._parse_memstats(data))
+
+ if self.configuration.get('extra_charts'):
+ # the memstats part of the data has been already parsed, so we remove it before flattening and checking
+ # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
+ del (data['memstats'])
+ flattened = flatten(data)
+
+ for ev in self.expvars:
+ v = flattened.get(ev.key)
+
+ if v is None:
+ continue
+
+ try:
+ if ev.type == 'int':
+ expvars[ev.id] = int(v)
+ elif ev.type == 'float':
+ expvars[ev.id] = float(v) * 100
+ except ValueError:
+ self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(ev.key, ev.type))
+ return None
+
+ return expvars
+
+ @staticmethod
+ def _parse_memstats(data):
+
+ memstats = data['memstats']
+
+ # calculate the number of live objects in memory
+ live_objs = int(memstats['Mallocs']) - int(memstats['Frees'])
+
+ # calculate GC pause times average
+ # the Go runtime keeps the last 256 GC pause durations in a circular buffer,
+ # so we need to filter out the 0 values before the buffer is filled
+ gc_pauses = memstats['PauseNs']
+ try:
+ gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0])
+ # no GC cycles have occured yet
+ except ZeroDivisionError:
+ gc_pause_avg = 0
+
+ return {
+ 'memstats_heap_alloc': memstats['HeapAlloc'],
+ 'memstats_heap_inuse': memstats['HeapInuse'],
+ 'memstats_stack_inuse': memstats['StackInuse'],
+ 'memstats_mspan_inuse': memstats['MSpanInuse'],
+ 'memstats_mcache_inuse': memstats['MCacheInuse'],
+ 'memstats_sys': memstats['Sys'],
+ 'memstats_live_objects': live_objs,
+ 'memstats_gc_pauses': gc_pause_avg,
+ }
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf
new file mode 100644
index 0000000..4b821cd
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf
@@ -0,0 +1,108 @@
+# netdata python.d.plugin configuration for go_expvar
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint
+#
+# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include
+# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# collect_memstats: true # enables charts for Go runtime's memory statistics
+# extra_charts: {} # defines extra data/charts to monitor, please see the example below
+#
+# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to
+# collect.
+#
+# Please visit the module wiki page for more information on how to use the extra_charts variable:
+#
+# https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin/go_expvar
+#
+# Configuration example
+# ---------------------
+
+#app1:
+# name : 'app1'
+# url : 'http://127.0.0.1:8080/debug/vars'
+# collect_memstats: true
+# extra_charts:
+# - id: "runtime_goroutines"
+# options:
+# name: num_goroutines
+# title: "runtime: number of goroutines"
+# units: goroutines
+# family: runtime
+# context: expvar.runtime.goroutines
+# chart_type: line
+# lines:
+# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+# - id: "foo_counters"
+# options:
+# name: counters
+# title: "some random counters"
+# units: awesomeness
+# family: counters
+# context: expvar.foo.counters
+# chart_type: line
+# lines:
+# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+
diff --git a/collectors/python.d.plugin/haproxy/Makefile.inc b/collectors/python.d.plugin/haproxy/Makefile.inc
new file mode 100644
index 0000000..ad24dea
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += haproxy/haproxy.chart.py
+dist_pythonconfig_DATA += haproxy/haproxy.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += haproxy/README.md haproxy/Makefile.inc
+
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
new file mode 100644
index 0000000..4bd80a2
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -0,0 +1,51 @@
+# haproxy
+
+Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
+And health metrics such as backend servers status (server check should be used).
+
+Plugin can obtain data from url **OR** unix socket.
+
+**Requirement:**
+Socket MUST be readable AND writable by netdata user.
+
+It produces:
+
+1. **Frontend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+2. **Backend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+3. **Health** chart
+ * number of failed servers for every backend (in DOWN state)
+
+
+### configuration
+
+Sample:
+
+```yaml
+via_url:
+ user : 'username' # ONLY IF stats auth is used
+ pass : 'password' # # ONLY IF stats auth is used
+ url : 'http://ip.address:port/url;csv;norefresh'
+```
+
+OR
+
+```yaml
+via_socket:
+ socket : 'path/to/haproxy/sock'
+```
+
+If no configuration is given, module will fail to run.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhaproxy%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
new file mode 100644
index 0000000..d97d28d
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -0,0 +1,363 @@
+# -*- coding: utf-8 -*-
+# Description: haproxy netdata python.d module
+# Author: l2isbad, ktarasz
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import defaultdict
+from re import compile as re_compile
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
+
+from bases.FrameworkServices.SocketService import SocketService
+from bases.FrameworkServices.UrlService import UrlService
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'fbin',
+ 'fbout',
+ 'fscur',
+ 'fqcur',
+ 'fhrsp_1xx',
+ 'fhrsp_2xx',
+ 'fhrsp_3xx',
+ 'fhrsp_4xx',
+ 'fhrsp_5xx',
+ 'fhrsp_other',
+ 'fhrsp_total',
+ 'bbin',
+ 'bbout',
+ 'bscur',
+ 'bqcur',
+ 'bhrsp_1xx',
+ 'bhrsp_2xx',
+ 'bhrsp_3xx',
+ 'bhrsp_4xx',
+ 'bhrsp_5xx',
+ 'bhrsp_other',
+ 'bhrsp_total',
+ 'bqtime',
+ 'bttime',
+ 'brtime',
+ 'bctime',
+ 'health_sup',
+ 'health_sdown',
+ 'health_bdown',
+ 'health_idle'
+]
+
+CHARTS = {
+ 'fbin': {
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'],
+ 'lines': []
+ },
+ 'fbout': {
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'],
+ 'lines': []
+ },
+ 'fscur': {
+ 'options': [None, 'Sessions Active', 'sessions', 'frontend', 'haproxy_f.scur', 'line'],
+ 'lines': []
+ },
+ 'fqcur': {
+ 'options': [None, 'Session In Queue', 'sessions', 'frontend', 'haproxy_f.qcur', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_1xx': {
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_2xx': {
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_3xx': {
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_4xx': {
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_5xx': {
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_other': {
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'frontend',
+ 'haproxy_f.hrsp_other', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_total': {
+ 'options': [None, 'HTTP responses', 'responses', 'frontend', 'haproxy_f.hrsp_total', 'line'],
+ 'lines': []
+ },
+ 'bbin': {
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'],
+ 'lines': []
+ },
+ 'bbout': {
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'],
+ 'lines': []
+ },
+ 'bscur': {
+ 'options': [None, 'Sessions Active', 'sessions', 'backend', 'haproxy_b.scur', 'line'],
+ 'lines': []
+ },
+ 'bqcur': {
+ 'options': [None, 'Sessions In Queue', 'sessions', 'backend', 'haproxy_b.qcur', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_1xx': {
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_1xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_2xx': {
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_2xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_3xx': {
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_3xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_4xx': {
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_4xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_5xx': {
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_5xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_other': {
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'backend',
+ 'haproxy_b.hrsp_other', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_total': {
+ 'options': [None, 'HTTP responses (total)', 'responses/s', 'backend', 'haproxy_b.hrsp_total', 'line'],
+ 'lines': []
+ },
+ 'bqtime': {
+ 'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.qtime', 'line'],
+ 'lines': []
+ },
+ 'bctime': {
+ 'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.ctime', 'line'],
+ 'lines': []
+ },
+ 'brtime': {
+ 'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.rtime', 'line'],
+ 'lines': []
+ },
+ 'bttime': {
+ 'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.ttime', 'line'],
+ 'lines': []
+ },
+ 'health_sdown': {
+ 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'],
+ 'lines': []
+ },
+ 'health_sup': {
+ 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
+ 'lines': []
+ },
+ 'health_bdown': {
+ 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
+ 'lines': []
+ },
+ 'health_idle': {
+ 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'],
+ 'lines': [
+ ['idle', None, 'absolute']
+ ]
+ }
+}
+
+
+METRICS = {
+ 'bin': {'algorithm': 'incremental', 'divisor': 1024},
+ 'bout': {'algorithm': 'incremental', 'divisor': 1024},
+ 'scur': {'algorithm': 'absolute', 'divisor': 1},
+ 'qcur': {'algorithm': 'absolute', 'divisor': 1},
+ 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
+}
+
+
+BACKEND_METRICS = {
+ 'qtime': {'algorithm': 'absolute', 'divisor': 1},
+ 'ctime': {'algorithm': 'absolute', 'divisor': 1},
+ 'rtime': {'algorithm': 'absolute', 'divisor': 1},
+ 'ttime': {'algorithm': 'absolute', 'divisor': 1}
+}
+
+
+REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
+ socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
+
+
+# TODO: the code is unreadable
+class Service(UrlService, SocketService):
+ def __init__(self, configuration=None, name=None):
+ if 'socket' in configuration:
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.poll = SocketService
+ self.options_ = dict(regex=REGEX['socket'],
+ stat='show stat\n'.encode(),
+ info='show info\n'.encode())
+ else:
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.poll = UrlService
+ self.options_ = dict(regex=REGEX['url'],
+ stat=self.url,
+ info=url_remove_params(self.url))
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def check(self):
+ if self.poll.check(self):
+ self.create_charts()
+ self.info('We are using %s.' % self.poll.__name__)
+ return True
+ return False
+
+ def _get_data(self):
+ to_netdata = dict()
+ self.request, self.url = self.options_['stat'], self.options_['stat']
+ stat_data = self._get_stat_data()
+ self.request, self.url = self.options_['info'], self.options_['info']
+ info_data = self._get_info_data(regex=self.options_['regex'])
+
+ to_netdata.update(stat_data)
+ to_netdata.update(info_data)
+ return to_netdata or None
+
+ def _get_stat_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+
+ if not raw_data:
+ return dict()
+
+ raw_data = raw_data.splitlines()
+ self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(',')))
+ for _ in range(1, len(raw_data))])
+ if not self.data:
+ return dict()
+
+ stat_data = dict()
+
+ for frontend in self.data['frontend']:
+ for metric in METRICS:
+ idx = frontend['# pxname'].replace('.', '_')
+ stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0
+
+ for backend in self.data['backend']:
+ name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_')
+ stat_data['hsup_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'UP')])
+ stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'DOWN')])
+ stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
+ for metric in BACKEND_METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ hrsp_total = 0
+ for metric in METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ if metric.startswith('hrsp_'):
+ hrsp_total += int(backend.get(metric) or 0)
+ stat_data['_'.join(['backend', 'hrsp_total', idx])] = hrsp_total
+ return stat_data
+
+ def _get_info_data(self, regex):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+ if not raw_data:
+ return dict()
+
+ match = regex.search(raw_data)
+ return match.groupdict() if match else dict()
+
+ @staticmethod
+ def _check_raw_data(data):
+ """
+ Check if all data has been gathered from socket
+ :param data: str
+ :return: boolean
+ """
+ return not bool(data)
+
+ def create_charts(self):
+ for front in self.data['frontend']:
+ name, idx = front['# pxname'], front['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]),
+ name, 'incremental', 1, 1])
+ for back in self.data['backend']:
+ name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]),
+ name, 'incremental', 1, 1])
+ for metric in BACKEND_METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, BACKEND_METRICS[metric]['algorithm'], 1,
+ BACKEND_METRICS[metric]['divisor']])
+ self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute'])
+ self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
+ self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
+
+
+def parse_data_(data):
+ def is_backend(backend):
+ return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
+
+ def is_frontend(frontend):
+ return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
+
+ def is_server(server):
+ return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
+
+ if not data:
+ return None
+
+ result = defaultdict(list)
+ for elem in data:
+ if is_backend(elem):
+ result['backend'].append(elem)
+ continue
+ elif is_frontend(elem):
+ result['frontend'].append(elem)
+ continue
+ elif is_server(elem):
+ result['servers'].append(elem)
+
+ return result or None
+
+
+def server_status(server, backend_name, status='DOWN'):
+ return server.get('# pxname') == backend_name and server.get('status') == status
+
+
+def url_remove_params(url):
+ parsed = urlparse(url or str())
+ return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path)
diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
new file mode 100644
index 0000000..10a0df3
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/haproxy.conf
@@ -0,0 +1,83 @@
+# netdata python.d.plugin configuration for haproxy
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, haproxy also supports the following:
+#
+# IMPORTANT: socket MUST BE readable AND writable by netdata user
+#
+# socket: 'path/to/haproxy/sock'
+#
+# OR
+# url: 'http://<ip.address>:<port>/<url>;csv;norefresh'
+# [user: USERNAME] only if stats auth is used
+# [pass: PASSWORD] only if stats auth is used
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+#via_url:
+# user : 'admin'
+# pass : 'password'
+# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh'
+
+#via_socket:
+# socket: '/var/run/haproxy/admin.sock'
diff --git a/collectors/python.d.plugin/hddtemp/Makefile.inc b/collectors/python.d.plugin/hddtemp/Makefile.inc
new file mode 100644
index 0000000..22852b6
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += hddtemp/hddtemp.chart.py
+dist_pythonconfig_DATA += hddtemp/hddtemp.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc
+
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
new file mode 100644
index 0000000..d9f254d
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -0,0 +1,24 @@
+# hddtemp
+
+Module monitors disk temperatures from one or more hddtemp daemons.
+
+**Requirement:**
+Running `hddtemp` in daemonized mode with access on tcp port
+
+It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
+
+### configuration
+
+Sample:
+
+```yaml
+update_every: 3
+host: "127.0.0.1"
+port: 7634
+```
+
+If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhddtemp%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
new file mode 100644
index 0000000..810aaac
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# Description: hddtemp netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+
+ORDER = [
+ 'temperatures',
+]
+
+CHARTS = {
+ 'temperatures': {
+ 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'],
+ 'lines': [
+ # lines are created dynamically in `check()` method
+ ]}}
+
+RE = re.compile(r'\/dev\/([^|]+)\|([^|]+)\|([0-9]+|SLP|UNK)\|')
+
+
+class Disk:
+ def __init__(self, id_, name, temp):
+ self.id = id_.split('/')[-1]
+ self.name = name.replace(' ', '_')
+ self.temp = temp if temp.isdigit() else 0
+
+ def __repr__(self):
+ return self.id
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.do_only = self.configuration.get('devices')
+ self._keep_alive = False
+ self.request = ""
+ self.host = "127.0.0.1"
+ self.port = 7634
+
+ def get_disks(self):
+ r = self._get_raw_data()
+
+ if not r:
+ return None
+
+ m = RE.findall(r)
+
+ if not m:
+ self.error("received data doesn't have needed records")
+ return None
+
+ rv = [Disk(*d) for d in m]
+ self.debug('available disks: {0}'.format(rv))
+
+ if self.do_only:
+ return [v for v in rv if v.id in self.do_only]
+ return rv
+
+ def get_data(self):
+ """
+ Get data from TCP/IP socket
+ :return: dict
+ """
+
+ disks = self.get_disks()
+
+ if not disks:
+ return None
+
+ return dict((d.id, d.temp) for d in disks)
+
+ def check(self):
+ """
+ Parse configuration, check if hddtemp is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ self._parse_config()
+ disks = self.get_disks()
+
+ if not disks:
+ return False
+
+ for d in disks:
+ dim = [d.id]
+ self.definitions['temperatures']['lines'].append(dim)
+
+ return True
+
+ @staticmethod
+ def _check_raw_data(data):
+ return not bool(data)
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf
new file mode 100644
index 0000000..b2d7aef
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf
@@ -0,0 +1,95 @@
+# netdata python.d.plugin configuration for hddtemp
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, hddtemp also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+
+# By default this module will try to autodetect disks
+# (autodetection works only for disk which names start with "sd").
+# However this can be overridden by setting variable `disks` to
+# array of desired disks. Example for two disks:
+#
+# devices:
+# - sda
+# - sdb
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 7634
+
+localipv4:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+
+localipv6:
+ name: 'local'
+ host: '::1'
+ port: 7634
diff --git a/collectors/python.d.plugin/httpcheck/Makefile.inc b/collectors/python.d.plugin/httpcheck/Makefile.inc
new file mode 100644
index 0000000..4a5bd85
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += httpcheck/httpcheck.chart.py
+dist_pythonconfig_DATA += httpcheck/httpcheck.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += httpcheck/README.md httpcheck/Makefile.inc
+
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
new file mode 100644
index 0000000..4cd024d
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -0,0 +1,43 @@
+# httpcheck
+
+Module monitors remote http server for availability and response time.
+
+Following charts are drawn per job:
+
+1. **Response time** ms
+ * Time in 0.1 ms resolution in which the server responds.
+ If the connection failed, the value is missing.
+
+2. **Status** boolean
+ * Connection successful
+ * Unexpected content: No Regex match found in the response
+ * Unexpected status code: Do we get 500 errors?
+ * Connection failed: port not listening or blocked
+ * Connection timed out: host or port unreachable
+
+### configuration
+
+Sample configuration and their default values.
+
+```yaml
+server:
+ url: 'http://host:port/path' # required
+ status_accepted: # optional
+ - 200
+ timeout: 1 # optional, supports decimals (e.g. 0.2)
+ update_every: 3 # optional
+ regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html
+ redirect: yes # optional
+```
+
+### notes
+
+ * The status chart is primarily intended for alarms, badges or for access via API.
+ * A system/service/firewall might block netdata's access if a portscan or
+ similar is detected.
+ * This plugin is meant for simple use cases. Currently, the accuracy of the
+ response time is low and should be used as reference only.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fhttpcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
new file mode 100644
index 0000000..fd51370
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# Description: http check netdata python.d module
+# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import urllib3
+import re
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 3
+priority = 60000
+
+# Response
+HTTP_RESPONSE_TIME = 'time'
+HTTP_RESPONSE_LENGTH = 'length'
+
+# Status dimensions
+HTTP_SUCCESS = 'success'
+HTTP_BAD_CONTENT = 'bad_content'
+HTTP_BAD_STATUS = 'bad_status'
+HTTP_TIMEOUT = 'timeout'
+HTTP_NO_CONNECTION = 'no_connection'
+
+ORDER = [
+ 'response_time',
+ 'response_length',
+ 'status',
+]
+
+CHARTS = {
+ 'response_time': {
+ 'options': [None, 'HTTP response time', 'milliseconds', 'response', 'httpcheck.responsetime', 'line'],
+ 'lines': [
+ [HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
+ ]
+ },
+ 'response_length': {
+ 'options': [None, 'HTTP response body length', 'characters', 'response', 'httpcheck.responselength', 'line'],
+ 'lines': [
+ [HTTP_RESPONSE_LENGTH, 'length', 'absolute']
+ ]
+ },
+ 'status': {
+ 'options': [None, 'HTTP status', 'boolean', 'status', 'httpcheck.status', 'line'],
+ 'lines': [
+ [HTTP_SUCCESS, 'success', 'absolute'],
+ [HTTP_BAD_CONTENT, 'bad content', 'absolute'],
+ [HTTP_BAD_STATUS, 'bad status', 'absolute'],
+ [HTTP_TIMEOUT, 'timeout', 'absolute'],
+ [HTTP_NO_CONNECTION, 'no connection', 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ pattern = self.configuration.get('regex')
+ self.regex = re.compile(pattern) if pattern else None
+ self.status_codes_accepted = self.configuration.get('status_accepted', [200])
+ self.follow_redirect = self.configuration.get('redirect', True)
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = dict()
+ data[HTTP_SUCCESS] = 0
+ data[HTTP_BAD_CONTENT] = 0
+ data[HTTP_BAD_STATUS] = 0
+ data[HTTP_TIMEOUT] = 0
+ data[HTTP_NO_CONNECTION] = 0
+ url = self.url
+ try:
+ start = time()
+ status, content = self._get_raw_data_with_status(retries=1 if self.follow_redirect else False,
+ redirect=self.follow_redirect)
+ diff = time() - start
+ data[HTTP_RESPONSE_TIME] = max(round(diff * 10000), 0)
+ self.debug('Url: {url}. Host responded with status code {code} in {diff} s'.format(
+ url=url, code=status, diff=diff
+ ))
+ self.process_response(content, data, status)
+
+ except urllib3.exceptions.NewConnectionError as error:
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
+ data[HTTP_NO_CONNECTION] = 1
+
+ except (urllib3.exceptions.TimeoutError, urllib3.exceptions.PoolError) as error:
+ self.debug('Connection timed out: {url}. Error: {error}'.format(url=url, error=error))
+ data[HTTP_TIMEOUT] = 1
+
+ except urllib3.exceptions.HTTPError as error:
+ self.debug('Connection failed: {url}. Error: {error}'.format(url=url, error=error))
+ data[HTTP_NO_CONNECTION] = 1
+
+ except (TypeError, AttributeError) as error:
+ self.error('Url: {url}. Error: {error}'.format(url=url, error=error))
+ return None
+
+ return data
+
+ def process_response(self, content, data, status):
+ data[HTTP_RESPONSE_LENGTH] = len(content)
+ self.debug('Content: \n\n{content}\n'.format(content=content))
+ if status in self.status_codes_accepted:
+ if self.regex and self.regex.search(content) is None:
+ self.debug('No match for regex "{regex}" found'.format(regex=self.regex.pattern))
+ data[HTTP_BAD_CONTENT] = 1
+ else:
+ data[HTTP_SUCCESS] = 1
+ else:
+ data[HTTP_BAD_STATUS] = 1
diff --git a/collectors/python.d.plugin/httpcheck/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
new file mode 100644
index 0000000..1e1dd02
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf
@@ -0,0 +1,104 @@
+# netdata python.d.plugin configuration for httpcheck
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the httpcheck default is used, which is at 3 seconds.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# -- For this plugin, cleanup MUST be disabled, otherwise we lose response
+# time charts
+chart_cleanup: 0
+
+# Autodetection and retries do not work for this plugin
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# -------------------------------
+# ATTENTION: Any valid configuration will be accepted, even if initial connection fails!
+# -------------------------------
+#
+# There is intentionally no default config, e.g. for 'localhost'
+
+# job_name:
+# name: myname # [optional] the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 3 # [optional] the JOB's data collection frequency
+# priority: 60000 # [optional] the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# timeout: 1 # [optional] the timeout when connecting, supports decimals (e.g. 0.5s)
+# url: 'http[s]://host-ip-or-dns[:port][path]'
+# # [required] the remote host url to connect to. If [:port] is missing, it defaults to 80
+# # for HTTP and 443 for HTTPS. [path] is optional too, defaults to /
+# method: GET # [optional] the HTTP request method (POST, PUT, DELETE, HEAD etc.)
+# redirect: yes # [optional] If the remote host returns 3xx status codes, the redirection url will be
+# # followed (default).
+# status_accepted: # [optional] By default, 200 is accepted. Anything else will result in 'bad status' in the
+# # status chart, however: The response time will still be > 0, since the
+# # host responded with something.
+# # If redirect is enabled, the accepted status will be checked against the redirected page.
+# - 200 # Multiple status codes are possible. If you specify 'status_accepted', you would still
+# # need to add '200'. E.g. 'status_accepted: [301]' will trigger an error in 'bad status'
+# # if code is 200. Do specify numerical entries such as 200, not 'OK'.
+# regex: None # [optional] If the status code is accepted, the content of the response will be searched for this
+# # regex (if defined). Be aware that you may need to escape the regex string. If redirect is enabled,
+# # the regex will be matched to the redirected page, not the initial 3xx response.
+
+# Simple example:
+#
+# jira:
+# url: 'https://jira.localdomain/'
+
+
+# Complex example:
+#
+# cool_website:
+# url: 'http://cool.website:8080/home'
+# status_accepted:
+# - 200
+# - 204
+# regex: <title>My cool website!<\/title>
+# timeout: 2
+
+# This plugin is intended for simple cases. Currently, the accuracy of the response time is low and should be used as reference only.
+
diff --git a/collectors/python.d.plugin/icecast/Makefile.inc b/collectors/python.d.plugin/icecast/Makefile.inc
new file mode 100644
index 0000000..cb7c6fa
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += icecast/icecast.chart.py
+dist_pythonconfig_DATA += icecast/icecast.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += icecast/README.md icecast/Makefile.inc
+
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
new file mode 100644
index 0000000..068da6a
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -0,0 +1,28 @@
+# icecast
+
+This module will monitor number of listeners for active sources.
+
+**Requirements:**
+ * icecast version >= 2.4.0
+
+It produces the following charts:
+
+1. **Listeners** in listeners
+ * source number
+
+### configuration
+
+Needs only `url` to server's `/status-json.xsl`
+
+Here is an example for remote server:
+
+```yaml
+remote:
+ url : 'http://1.2.3.4:8443/status-json.xsl'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ficecast%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
new file mode 100644
index 0000000..40eaf89
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Description: icecast netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'listeners',
+]
+
+CHARTS = {
+ 'listeners': {
+ 'options': [None, 'Number Of Listeners', 'listeners', 'listeners', 'icecast.listeners', 'line'],
+ 'lines': [
+ ]
+ }
+}
+
+
+class Source:
+ def __init__(self, idx, data):
+ self.name = 'source_{0}'.format(idx)
+ self.is_active = data.get('stream_start') and data.get('server_name')
+ self.listeners = data['listeners']
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url')
+ self._manager = self._build_manager()
+
+ def check(self):
+ """
+ Add active sources to the "listeners" chart
+ :return: bool
+ """
+ sources = self.get_sources()
+ if not sources:
+ return None
+
+ active_sources = 0
+ for idx, raw_source in enumerate(sources):
+ if Source(idx, raw_source).is_active:
+ active_sources += 1
+ dim_id = 'source_{0}'.format(idx)
+ dim = 'source {0}'.format(idx)
+ self.definitions['listeners']['lines'].append([dim_id, dim])
+
+ return bool(active_sources)
+
+ def _get_data(self):
+ """
+ Get number of listeners for every source
+ :return: dict
+ """
+ sources = self.get_sources()
+ if not sources:
+ return None
+
+ data = dict()
+
+ for idx, raw_source in enumerate(sources):
+ source = Source(idx, raw_source)
+ data[source.name] = source.listeners
+
+ return data
+
+ def get_sources(self):
+ """
+ Format data received from http request and return list of sources
+ :return: list
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ data = json.loads(raw_data)
+ except ValueError as error:
+ self.error('JSON decode error:', error)
+ return None
+
+ sources = data['icestats'].get('source')
+ if not sources:
+ return None
+
+ return sources if isinstance(sources, list) else [sources]
diff --git a/collectors/python.d.plugin/icecast/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf
new file mode 100644
index 0000000..a33074a
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/icecast.conf
@@ -0,0 +1,81 @@
+# netdata python.d.plugin configuration for icecast
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, icecast also supports the following:
+#
+# url: 'URL' # the URL to fetch icecast's stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8443/status-json.xsl'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8443/status-json.xsl' \ No newline at end of file
diff --git a/collectors/python.d.plugin/ipfs/Makefile.inc b/collectors/python.d.plugin/ipfs/Makefile.inc
new file mode 100644
index 0000000..68458cb
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ipfs/ipfs.chart.py
+dist_pythonconfig_DATA += ipfs/ipfs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ipfs/README.md ipfs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
new file mode 100644
index 0000000..a839203
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -0,0 +1,27 @@
+# ipfs
+
+Module monitors [IPFS](https://ipfs.io) basic information.
+
+1. **Bandwidth** in kbits/s
+ * in
+ * out
+
+2. **Peers**
+ * peers
+
+### configuration
+
+Only url to IPFS server is needed.
+
+Sample:
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://localhost:5001'
+```
+
+---
+
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fipfs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
new file mode 100644
index 0000000..8c89b4b
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# Description: IPFS netdata python.d module
+# Authors: davidak
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'repo_size',
+ 'repo_objects',
+]
+
+CHARTS = {
+ 'bandwidth': {
+ 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
+ 'lines': [
+ ['in', None, 'absolute', 8, 1000],
+ ['out', None, 'absolute', -8, 1000]
+ ]
+ },
+ 'peers': {
+ 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
+ 'lines': [
+ ['peers', None, 'absolute']
+ ]
+ },
+ 'repo_size': {
+ 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
+ 'lines': [
+ ['avail', None, 'absolute', 1, 1 << 30],
+ ['size', None, 'absolute', 1, 1 << 30],
+ ]
+ },
+ 'repo_objects': {
+ 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
+ 'lines': [
+ ['objects', None, 'absolute', 1, 1],
+ ['pinned', None, 'absolute', 1, 1],
+ ['recursive_pins', None, 'absolute', 1, 1]
+ ]
+ }
+}
+
+SI_zeroes = {
+ 'k': 3,
+ 'm': 6,
+ 'g': 9,
+ 't': 12,
+ 'p': 15,
+ 'e': 18,
+ 'z': 21,
+ 'y': 24
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.baseurl = self.configuration.get('url', 'http://localhost:5001')
+ self.do_pinapi = self.configuration.get('pinapi')
+ self.__storage_max = None
+
+ def _get_json(self, sub_url):
+ """
+ :return: json decoding of the specified url
+ """
+ self.url = self.baseurl + sub_url
+ try:
+ return json.loads(self._get_raw_data())
+ except (TypeError, ValueError):
+ return dict()
+
+ @staticmethod
+ def _recursive_pins(keys):
+ return sum(1 for k in keys if keys[k]['Type'] == b'recursive')
+
+ @staticmethod
+ def _dehumanize(store_max):
+ # convert from '10Gb' to 10000000000
+ if not isinstance(store_max, int):
+ store_max = store_max.lower()
+ if store_max.endswith('b'):
+ val, units = store_max[:-2], store_max[-2]
+ if units in SI_zeroes:
+ val += '0'*SI_zeroes[units]
+ store_max = val
+ try:
+ store_max = int(store_max)
+ except (TypeError, ValueError):
+ store_max = None
+ return store_max
+
+ def _storagemax(self, store_cfg):
+ if self.__storage_max is None:
+ self.__storage_max = self._dehumanize(store_cfg)
+ return self.__storage_max
+
+ def _get_data(self):
+ """
+ Get data from API
+ :return: dict
+ """
+ # suburl : List of (result-key, original-key, transform-func)
+ cfg = {
+ '/api/v0/stats/bw':
+ [('in', 'RateIn', int), ('out', 'RateOut', int)],
+ '/api/v0/swarm/peers':
+ [('peers', 'Peers', len)],
+ '/api/v0/stats/repo':
+ [('size', 'RepoSize', int), ('objects', 'NumObjects', int), ('avail', 'StorageMax', self._storagemax)],
+ }
+ if self.do_pinapi:
+ cfg.update({
+ '/api/v0/pin/ls':
+ [('pinned', 'Keys', len), ('recursive_pins', 'Keys', self._recursive_pins)]
+ })
+ r = dict()
+ for suburl in cfg:
+ in_json = self._get_json(suburl)
+ for new_key, orig_key, xmute in cfg[suburl]:
+ try:
+ r[new_key] = xmute(in_json[orig_key])
+ except Exception as error:
+ self.debug(error)
+ return r or None
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
new file mode 100644
index 0000000..c7e1864
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for ipfs
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, ipfs also supports the following:
+#
+# url: 'URL' # URL to the IPFS API
+# pinapi: no # Set status of IPFS pinned object polling
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/3874
+# # resulting in very high CPU Usage
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:5001'
+ pinapi : no
diff --git a/collectors/python.d.plugin/isc_dhcpd/Makefile.inc b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
new file mode 100644
index 0000000..44343fc
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += isc_dhcpd/isc_dhcpd.chart.py
+dist_pythonconfig_DATA += isc_dhcpd/isc_dhcpd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += isc_dhcpd/README.md isc_dhcpd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
new file mode 100644
index 0000000..67547e2
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -0,0 +1,36 @@
+# isc_dhcpd
+
+Module monitor leases database to show all active leases for given pools.
+
+**Requirements:**
+ * dhcpd leases file MUST BE readable by netdata
+ * pools MUST BE in CIDR format
+
+It produces:
+
+1. **Pools utilization** Aggregate chart for all pools.
+ * utilization in percent
+
+2. **Total leases**
+ * leases (overall number of leases for all pools)
+
+3. **Active leases** for every pools
+ * leases (number of active leases in pool)
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ leases_path : '/var/lib/dhcp/dhcpd.leases'
+ pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+```
+
+In case of python2 you need to install `py2-ipaddress` to make plugin work.
+The module will not work If no configuration is given.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fisc_dhcpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
new file mode 100644
index 0000000..bbe7a93
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# Description: isc dhcpd lease netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+import time
+
+
+try:
+ import ipaddress
+ HAVE_IP_ADDRESS = True
+except ImportError:
+ HAVE_IP_ADDRESS = False
+
+from collections import defaultdict
+from copy import deepcopy
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+ORDER = [
+ 'pools_utilization',
+ 'pools_active_leases',
+ 'leases_total',
+]
+
+CHARTS = {
+ 'pools_utilization': {
+ 'options': [None, 'Pools Utilization', 'percentage', 'utilization', 'isc_dhcpd.utilization', 'line'],
+ 'lines': []
+ },
+ 'pools_active_leases': {
+ 'options': [None, 'Active Leases Per Pool', 'leases', 'active leases', 'isc_dhcpd.active_leases', 'line'],
+ 'lines': []
+ },
+ 'leases_total': {
+ 'options': [None, 'All Active Leases', 'leases', 'active leases', 'isc_dhcpd.leases_total', 'line'],
+ 'lines': [
+ ['leases_total', 'leases', 'absolute']
+ ],
+ 'variables': [
+ ['leases_size']
+ ]
+ }
+}
+
+
+class DhcpdLeasesFile:
+ def __init__(self, path):
+ self.path = path
+ self.mod_time = 0
+ self.size = 0
+
+ def is_valid(self):
+ return os.path.isfile(self.path) and os.access(self.path, os.R_OK)
+
+ def is_changed(self):
+ mod_time = os.path.getmtime(self.path)
+ if mod_time != self.mod_time:
+ self.mod_time = mod_time
+ self.size = int(os.path.getsize(self.path) / 1024)
+ return True
+ return False
+
+ def get_data(self):
+ try:
+ with open(self.path) as leases:
+ result = defaultdict(dict)
+ for row in leases:
+ row = row.strip()
+ if row.startswith('lease'):
+ address = row[6:-2]
+ elif row.startswith('iaaddr'):
+ address = row[7:-2]
+ elif row.startswith('ends'):
+ result[address]['ends'] = row[5:-1]
+ elif row.startswith('binding state'):
+ result[address]['state'] = row[14:-1]
+ return dict((k, v) for k, v in result.items() if len(v) == 2)
+ except (OSError, IOError):
+ return None
+
+
+class Pool:
+ def __init__(self, name, network):
+ self.id = re.sub(r'[:/.-]+', '_', name)
+ self.name = name
+ self.network = ipaddress.ip_network(address=u'%s' % network)
+
+ def num_hosts(self):
+ return self.network.num_addresses - 2
+
+ def __contains__(self, item):
+ return item.address in self.network
+
+
+class Lease:
+ def __init__(self, address, ends, state):
+ self.address = ipaddress.ip_address(address=u'%s' % address)
+ self.ends = ends
+ self.state = state
+
+ def is_active(self, current_time):
+ # lease_end_time might be epoch
+ if self.ends.startswith('epoch'):
+ epoch = int(self.ends.split()[1].replace(';', ''))
+ return epoch - current_time > 0
+ # max. int for lease-time causes lease to expire in year 2038.
+ # dhcpd puts 'never' in the ends section of active lease
+ elif self.ends == 'never':
+ return True
+ return time.mktime(time.strptime(self.ends, '%w %Y/%m/%d %H:%M:%S')) - current_time > 0
+
+ def is_valid(self):
+ return self.state == 'active'
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
+ self.dhcpd_leases = DhcpdLeasesFile(path=lease_path)
+ self.pools = list()
+ self.data = dict()
+
+ # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
+ # TODO: update algorithm to parse correctly 'local' db-time-format
+
+ def check(self):
+ if not HAVE_IP_ADDRESS:
+ self.error("'python-ipaddress' package is needed")
+ return False
+
+ if not self.dhcpd_leases.is_valid():
+ self.error("Make sure '{path}' is exist and readable by netdata".format(path=self.dhcpd_leases.path))
+ return False
+
+ pools = self.configuration.get('pools')
+ if not pools:
+ self.error('Pools are not defined')
+ return False
+
+ for pool in pools:
+ try:
+ new_pool = Pool(name=pool, network=pools[pool])
+ except ValueError as error:
+ self.error("'{pool}' was removed, error: {error}".format(pool=pools[pool], error=error))
+ else:
+ self.pools.append(new_pool)
+
+ self.create_charts()
+ return bool(self.pools)
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ if not self.dhcpd_leases.is_changed():
+ return self.data
+
+ raw_leases = self.dhcpd_leases.get_data()
+ if not raw_leases:
+ self.data = dict()
+ return None
+
+ active_leases = list()
+ current_time = time.mktime(time.gmtime())
+
+ for address in raw_leases:
+ try:
+ new_lease = Lease(address, **raw_leases[address])
+ except ValueError:
+ continue
+ else:
+ if new_lease.is_active(current_time) and new_lease.is_valid():
+ active_leases.append(new_lease)
+
+ for pool in self.pools:
+ count = len([ip for ip in active_leases if ip in pool])
+ self.data[pool.id + '_active_leases'] = count
+ self.data[pool.id + '_utilization'] = float(count) / pool.num_hosts() * 10000
+
+ self.data['leases_size'] = self.dhcpd_leases.size
+ self.data['leases_total'] = len(active_leases)
+
+ return self.data
+
+ def create_charts(self):
+ for pool in self.pools:
+ dim = [
+ pool.id + '_utilization',
+ pool.name,
+ 'absolute',
+ 1,
+ 100,
+ ]
+ self.definitions['pools_utilization']['lines'].append(dim)
+
+ dim = [
+ pool.id + '_active_leases',
+ pool.name,
+ ]
+ self.definitions['pools_active_leases']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
new file mode 100644
index 0000000..8dcb508
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
@@ -0,0 +1,79 @@
+# netdata python.d.plugin configuration for isc dhcpd leases
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, isc_dhcpd supports the following:
+#
+# leases_path: 'PATH' # the path to dhcpd.leases file
+# pools:
+# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+# wifi: '192.168.3.0/24' # name(dimension): pool in CIDR format
+# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
+#
+#-----------------------------------------------------------------------
+# IMPORTANT notes
+#
+# 1. Make sure leases file is readable by netdata.
+# 2. Current implementation works only with 'default' db-time-format
+# (weekday year/month/day hour:minute:second).
+# This is the default, so it will work in most cases.
+# 3. Pools MUST BE in CIDR format.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/linux_power_supply/Makefile.inc b/collectors/python.d.plugin/linux_power_supply/Makefile.inc
new file mode 100644
index 0000000..1864ba5
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += linux_power_supply/linux_power_supply.chart.py
+dist_pythonconfig_DATA += linux_power_supply/linux_power_supply.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += linux_power_supply/README.md linux_power_supply/Makefile.inc
+
diff --git a/collectors/python.d.plugin/linux_power_supply/README.md b/collectors/python.d.plugin/linux_power_supply/README.md
new file mode 100644
index 0000000..f5b05d1
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/README.md
@@ -0,0 +1,74 @@
+# Linux power supply
+
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT
+
+---
+
+This module monitors variosu metrics reported by power supply drivers
+on Linux. This allows tracking and alerting on things like remaining
+battery capacity.
+
+Depending on the uderlying driver, it may provide the following charts
+and metrics:
+
+1. Capacity: The power supply capacity expressed as a percentage.
+ * capacity\_now
+
+2. Charge: The charge for the power supply, expressed as microamphours.
+ * charge\_full\_design
+ * charge\_full
+ * charge\_now
+ * charge\_empty
+ * charge\_empty\_design
+
+3. Energy: The energy for the power supply, expressed as microwatthours.
+ * energy\_full\_design
+ * energy\_full
+ * energy\_now
+ * energy\_empty
+ * energy\_empty\_design
+
+2. Voltage: The voltage for the power supply, expressed as microvolts.
+ * voltage\_max\_design
+ * voltage\_max
+ * voltage\_now
+ * voltage\_min
+ * voltage\_min\_design
+
+### configuration
+
+Sample:
+
+```yaml
+battery:
+ supply: 'BAT0'
+ charts: 'capacity charge energy voltage'
+```
+
+The `supply` key specifies the name of the power supply device to monitor.
+You can use `ls /sys/class/power_supply` to get a list of such devices
+on your system.
+
+The `charts` key is a space separated list of which charts to try
+to display. It defaults to trying to display everything.
+
+### notes
+
+* Most drivers provide at least the first chart. Battery powered ACPI
+compliant systems (like most laptops) provide all but the third, but do
+not provide all of the metrics for each chart.
+
+* Current, energy, and voltages are reported with a _very_ high precision
+by the power\_supply framework. Usually, this is far higher than the
+actual hardware supports reporting, so expect to see changes in these
+charts jump instead of scaling smoothly.
+
+* If `max` or `full` attribute is defined by the driver, but not a
+corresponding `min or `empty` attribute, then netdata will still provide
+the corresponding `min` or `empty`, which will then always read as zero.
+This way, alerts which match on these will still work.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flinux_power_supply%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py
new file mode 100644
index 0000000..71d834e
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Description: Linux power_supply netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+
+import os
+import platform
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# Everything except percentages is reported as µ units.
+PRECISION = 10 ** 6
+
+# A priority of 90000 places us next to the other PSU related stuff.
+PRIORITY = 90000
+
+# We add our charts dynamically when we probe for the device attributes,
+# so these are empty by default.
+ORDER = []
+
+CHARTS = {}
+
+
+def get_capacity_chart(syspath):
+ # Capacity is measured in percent. We track one value.
+ options = [None, 'Capacity', '%', 'power_supply', 'power_supply.capacity', 'line']
+ lines = list()
+ attr_now = 'capacity'
+ if get_sysfs_value(os.path.join(syspath, attr_now)) is not None:
+ lines.append([attr_now, attr_now, 'absolute', 1, 1])
+ return {'capacity': {'options': options, 'lines': lines}}, [attr_now]
+ else:
+ return None, None
+
+
+def get_generic_chart(syspath, name, unit, maxname, minname):
+ # Used to generate charts for energy, charge, and voltage.
+ options = [None, name.title(), unit, 'power_supply', 'power_supply.{0}'.format(name), 'line']
+ lines = list()
+ attrlist = list()
+ attr_max_design = '{0}_{1}_design'.format(name, maxname)
+ attr_max = '{0}_{1}'.format(name, maxname)
+ attr_now = '{0}_now'.format(name)
+ attr_min = '{0}_{1}'.format(name, minname)
+ attr_min_design = '{0}_{1}_design'.format(name, minname)
+ if get_sysfs_value(os.path.join(syspath, attr_now)) is not None:
+ lines.append([attr_now, attr_now, 'absolute', 1, PRECISION])
+ attrlist.append(attr_now)
+ else:
+ return None, None
+ if get_sysfs_value(os.path.join(syspath, attr_max)) is not None:
+ lines.insert(0, [attr_max, attr_max, 'absolute', 1, PRECISION])
+ lines.append([attr_min, attr_min, 'absolute', 1, PRECISION])
+ attrlist.append(attr_max)
+ attrlist.append(attr_min)
+ elif get_sysfs_value(os.path.join(syspath, attr_min)) is not None:
+ lines.append([attr_min, attr_min, 'absolute', 1, PRECISION])
+ attrlist.append(attr_min)
+ if get_sysfs_value(os.path.join(syspath, attr_max_design)) is not None:
+ lines.insert(0, [attr_max_design, attr_max_design, 'absolute', 1, PRECISION])
+ lines.append([attr_min_design, attr_min_design, 'absolute', 1, PRECISION])
+ attrlist.append(attr_max_design)
+ attrlist.append(attr_min_design)
+ elif get_sysfs_value(os.path.join(syspath, attr_min_design)) is not None:
+ lines.append([attr_min_design, attr_min_design, 'absolute', 1, PRECISION])
+ attrlist.append(attr_min_design)
+ return {name: {'options': options, 'lines': lines}}, attrlist
+
+
+def get_charge_chart(syspath):
+ # Charge is measured in microamphours. We track up to five
+ # attributes.
+ return get_generic_chart(syspath, 'charge', 'µAh', 'full', 'empty')
+
+
+def get_energy_chart(syspath):
+ # Energy is measured in microwatthours. We track up to five
+ # attributes.
+ return get_generic_chart(syspath, 'energy', 'µWh', 'full', 'empty')
+
+
+def get_voltage_chart(syspath):
+ # Voltage is measured in microvolts. We track up to five attributes.
+ return get_generic_chart(syspath, 'voltage', 'µV', 'min', 'max')
+
+
+# This is a list of functions for generating charts. Used below to save
+# a bit of code (and to make it a bit easier to add new charts).
+GET_CHART = {
+ 'capacity': get_capacity_chart,
+ 'charge': get_charge_chart,
+ 'energy': get_energy_chart,
+ 'voltage': get_voltage_chart
+}
+
+
+# This opens the specified file and returns the value in it or None if
+# the file doesn't exist.
+def get_sysfs_value(filepath):
+ try:
+ with open(filepath, 'r') as datasource:
+ return int(datasource.read())
+ except (OSError, IOError):
+ return None
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.definitions = dict()
+ self.order = list()
+ self.attrlist = list()
+ self.supply = self.configuration.get('supply', None)
+ if self.supply is not None:
+ self.syspath = '/sys/class/power_supply/{0}'.format(self.supply)
+ self.types = self.configuration.get('charts', 'capacity').split()
+
+ def check(self):
+ if platform.system() != 'Linux':
+ self.error('Only supported on Linux.')
+ return False
+ if self.supply is None:
+ self.error('No power supply specified for monitoring.')
+ return False
+ if not self.types:
+ self.error('No attributes requested for monitoring.')
+ return False
+ if not os.access(self.syspath, os.R_OK):
+ self.error('Unable to access {0}'.format(self.syspath))
+ return False
+ return self.create_charts()
+
+ def create_charts(self):
+ chartset = set(GET_CHART).intersection(set(self.types))
+ if not chartset:
+ self.error('No valid attributes requested for monitoring.')
+ return False
+ charts = dict()
+ attrlist = list()
+ for item in chartset:
+ chart, attrs = GET_CHART[item](self.syspath)
+ if chart is not None:
+ charts.update(chart)
+ attrlist.extend(attrs)
+ if len(charts) == 0:
+ self.error('No charts can be created.')
+ return False
+ self.definitions.update(charts)
+ self.order.extend(sorted(charts))
+ self.attrlist.extend(attrlist)
+ return True
+
+ def _get_data(self):
+ data = dict()
+ for attr in self.attrlist:
+ attrpath = os.path.join(self.syspath, attr)
+ if attr.endswith(('_min', '_min_design', '_empty', '_empty_design')):
+ data[attr] = get_sysfs_value(attrpath) or 0
+ else:
+ data[attr] = get_sysfs_value(attrpath)
+ return data
diff --git a/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
new file mode 100644
index 0000000..96eeef4
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
@@ -0,0 +1,79 @@
+# netdata python.d.plugin configuration for linux_power_supply
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# In addition to the above parameters, linux_power_supply also supports
+# the following extra parameters.
+#
+# supply: '' # the name of the power supply to monitor
+# charts: 'capacity' # a space separated list of the charts to try
+# # and generate valid charts are 'capacity',
+# # 'charge', 'current', and 'voltage'
+#
+# Note that linux_power_supply will not automatically detect power
+# supplies in the system, you have to manually specify which ones you
+# want it to monitor.
+#
+# The following config will work to monitor the first battery in most
+# ACPI compliant battery powered systems (such as most laptops).
+#
+# battery:
+# name: battery
+# supply: BAT0
diff --git a/collectors/python.d.plugin/litespeed/Makefile.inc b/collectors/python.d.plugin/litespeed/Makefile.inc
new file mode 100644
index 0000000..5dd6450
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += litespeed/litespeed.chart.py
+dist_pythonconfig_DATA += litespeed/litespeed.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += litespeed/README.md litespeed/Makefile.inc
+
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
new file mode 100644
index 0000000..88b6725
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -0,0 +1,49 @@
+# litespeed
+
+Module monitor litespeed web server performance metrics.
+
+It produces:
+
+1. **Network Throughput HTTP** in kilobits/s
+ * in
+ * out
+
+2. **Network Throughput HTTPS** in kilobits/s
+ * in
+ * out
+
+3. **Connections HTTP** in connections
+ * free
+ * used
+
+4. **Connections HTTPS** in connections
+ * free
+ * used
+
+5. **Requests** in requests/s
+ * requests
+
+6. **Requests In Processing** in requests
+ * processing
+
+7. **Public Cache Hits** in hits/s
+ * hits
+
+8. **Private Cache Hits** in hits/s
+ * hits
+
+9. **Static Hits** in hits/s
+ * hits
+
+
+### configuration
+```yaml
+local:
+ path : 'PATH'
+```
+
+If no configuration is given, module will use "/tmp/lshttpd/".
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flitespeed%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
new file mode 100644
index 0000000..9da9421
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+# Description: litespeed netdata python.d module
+# Author: Ilya Maschenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import glob
+import re
+import os
+
+from collections import namedtuple
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+update_every = 10
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'net_throughput_http', # net throughput
+ 'net_throughput_https', # net throughput
+ 'connections_http', # connections
+ 'connections_https', # connections
+ 'requests', # requests
+ 'requests_processing', # requests
+ 'pub_cache_hits', # cache
+ 'private_cache_hits', # cache
+ 'static_hits', # static
+]
+
+CHARTS = {
+ 'net_throughput_http': {
+ 'options': [None, 'Network Throughput HTTP', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['bps_in', 'in', 'absolute'],
+ ['bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'net_throughput_https': {
+ 'options': [None, 'Network Throughput HTTPS', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['ssl_bps_in', 'in', 'absolute'],
+ ['ssl_bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'connections_http': {
+ 'options': [None, 'Connections HTTP', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['conn_free', 'free', 'absolute'],
+ ['conn_used', 'used', 'absolute']
+ ]
+ },
+ 'connections_https': {
+ 'options': [None, 'Connections HTTPS', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['ssl_conn_free', 'free', 'absolute'],
+ ['ssl_conn_used', 'used', 'absolute']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'litespeed.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'absolute', 1, 100]
+ ]
+ },
+ 'requests_processing': {
+ 'options': [None, 'Requests In Processing', 'requests', 'requests', 'litespeed.requests_processing', 'line'],
+ 'lines': [
+ ['requests_processing', 'processing', 'absolute']
+ ]
+ },
+ 'pub_cache_hits': {
+ 'options': [None, 'Public Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['pub_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'private_cache_hits': {
+ 'options': [None, 'Private Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['private_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'static_hits': {
+ 'options': [None, 'Static Hits', 'hits/s', 'static', 'litespeed.static', 'line'],
+ 'lines': [
+ ['static_hits', 'hits', 'absolute', 1, 100]
+ ]
+ }
+}
+
+t = namedtuple('T', ['key', 'id', 'mul'])
+
+T = [
+ t('BPS_IN', 'bps_in', 8),
+ t('BPS_OUT', 'bps_out', 8),
+ t('SSL_BPS_IN', 'ssl_bps_in', 8),
+ t('SSL_BPS_OUT', 'ssl_bps_out', 8),
+ t('REQ_PER_SEC', 'requests', 100),
+ t('REQ_PROCESSING', 'requests_processing', 1),
+ t('PUB_CACHE_HITS_PER_SEC', 'pub_cache_hits', 100),
+ t('PRIVATE_CACHE_HITS_PER_SEC', 'private_cache_hits', 100),
+ t('STATIC_HITS_PER_SEC', 'static_hits', 100),
+ t('PLAINCONN', 'conn_used', 1),
+ t('AVAILCONN', 'conn_free', 1),
+ t('SSLCONN', 'ssl_conn_used', 1),
+ t('AVAILSSL', 'ssl_conn_free', 1),
+]
+
+RE = re.compile(r'([A-Z_]+): ([0-9.]+)')
+
+ZERO_DATA = {
+ 'bps_in': 0,
+ 'bps_out': 0,
+ 'ssl_bps_in': 0,
+ 'ssl_bps_out': 0,
+ 'requests': 0,
+ 'requests_processing': 0,
+ 'pub_cache_hits': 0,
+ 'private_cache_hits': 0,
+ 'static_hits': 0,
+ 'conn_used': 0,
+ 'conn_free': 0,
+ 'ssl_conn_used': 0,
+ 'ssl_conn_free': 0,
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.path = self.configuration.get('path', '/tmp/lshttpd/')
+ self.files = list()
+
+ def check(self):
+ if not self.path:
+ self.error('"path" not specified')
+ return False
+
+ fs = glob.glob(os.path.join(self.path, '.rtreport*'))
+
+ if not fs:
+ self.error('"{0}" has no "rtreport" files or dir is not readable'.format(self.path))
+ return None
+
+ self.debug('stats files:', fs)
+
+ for f in fs:
+ if not is_readable_file(f):
+ self.error('{0} is not readable'.format(f))
+ continue
+ self.files.append(f)
+
+ return bool(self.files)
+
+ def get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = dict(ZERO_DATA)
+
+ for f in self.files:
+ try:
+ with open(f) as b:
+ lines = b.readlines()
+ except (OSError, IOError) as err:
+ self.error(err)
+ return None
+ else:
+ parse_file(data, lines)
+
+ return data
+
+
+def parse_file(data, lines):
+ for line in lines:
+ if not line.startswith(('BPS_IN:', 'MAXCONN:', 'REQ_RATE []:')):
+ continue
+ m = dict(RE.findall(line))
+ for v in T:
+ if v.key in m:
+ data[v.id] += float(m[v.key]) * v.mul
+
+
+def is_readable_file(v):
+ return os.path.isfile(v) and os.access(v, os.R_OK)
diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf
new file mode 100644
index 0000000..a326e18
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for litespeed
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, lightspeed also supports the following:
+#
+# path: 'PATH' # path to lightspeed stats files directory
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ path : '/tmp/lshttpd/'
diff --git a/collectors/python.d.plugin/logind/Makefile.inc b/collectors/python.d.plugin/logind/Makefile.inc
new file mode 100644
index 0000000..adadab1
--- /dev/null
+++ b/collectors/python.d.plugin/logind/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += logind/logind.chart.py
+dist_pythonconfig_DATA += logind/logind.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += logind/README.md logind/Makefile.inc
+
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
new file mode 100644
index 0000000..c35630c
--- /dev/null
+++ b/collectors/python.d.plugin/logind/README.md
@@ -0,0 +1,56 @@
+# logind
+
+This module monitors active sessions, users, and seats tracked by systemd-logind or elogind.
+
+It provides the following charts:
+
+1. **Sessions** Tracks the total number of sessions.
+ * Graphical: Local graphical sessions (running X11, or Wayland, or something else).
+ * Console: Local console sessions.
+ * Remote: Remote sessions.
+
+2. **Users** Tracks total number of unique user logins of each type.
+ * Graphical
+ * Console
+ * Remote
+
+3. **Seats** Total number of seats in use.
+ * Seats
+
+### configuration
+
+This module needs no configuration. Just make sure the netdata user
+can run the `loginctl` command and get a session list without having to
+specify a path.
+
+This will work with any command that can output data in the _exact_
+same format as `loginctl list-sessions --no-legend`. If you have some
+other command you want to use that outputs data in this format, you can
+specify it using the `command` key like so:
+
+```yaml
+command: '/path/to/other/command'
+```
+
+### notes
+
+* This module's ability to track logins is dependent on what PAM services
+are configured to register sessions with logind. In particular, for
+most systems, it will only track TTY logins, local desktop logins,
+and logins through remote shell connections.
+
+* The users chart counts _usernames_ not UID's. This is potentially
+important in configurations where multiple users have the same UID.
+
+* The users chart counts any given user name up to once for _each_ type
+of login. So if the same user has a graphical and a console login on a
+system, they will show up once in the graphical count, and once in the
+console count.
+
+* Because the data collection process is rather expensive, this plugin
+is currently disabled by default, and needs to be explicitly enabled in
+`/etc/netdata/python.d.conf` before it will run.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Flogind%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/logind/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py
new file mode 100644
index 0000000..7086686
--- /dev/null
+++ b/collectors/python.d.plugin/logind/logind.chart.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Description: logind netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+priority = 59999
+disabled_by_default = True
+
+LOGINCTL_COMMAND = 'loginctl list-sessions --no-legend'
+
+ORDER = [
+ 'sessions',
+ 'users',
+ 'seats',
+]
+
+CHARTS = {
+ 'sessions': {
+ 'options': [None, 'Logind Sessions', 'sessions', 'sessions', 'logind.sessions', 'stacked'],
+ 'lines': [
+ ['sessions_graphical', 'Graphical', 'absolute', 1, 1],
+ ['sessions_console', 'Console', 'absolute', 1, 1],
+ ['sessions_remote', 'Remote', 'absolute', 1, 1]
+ ]
+ },
+ 'users': {
+ 'options': [None, 'Logind Users', 'users', 'users', 'logind.users', 'stacked'],
+ 'lines': [
+ ['users_graphical', 'Graphical', 'absolute', 1, 1],
+ ['users_console', 'Console', 'absolute', 1, 1],
+ ['users_remote', 'Remote', 'absolute', 1, 1]
+ ]
+ },
+ 'seats': {
+ 'options': [None, 'Logind Seats', 'seats', 'seats', 'logind.seats', 'line'],
+ 'lines': [
+ ['seats', 'Active Seats', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = LOGINCTL_COMMAND
+
+ def _get_data(self):
+ ret = {
+ 'sessions_graphical': 0,
+ 'sessions_console': 0,
+ 'sessions_remote': 0,
+ }
+ users = {
+ 'graphical': list(),
+ 'console': list(),
+ 'remote': list()
+ }
+ seats = list()
+ data = self._get_raw_data()
+
+ for item in data:
+ fields = item.split()
+ if len(fields) == 3:
+ users['remote'].append(fields[2])
+ ret['sessions_remote'] += 1
+ elif len(fields) == 4:
+ users['graphical'].append(fields[2])
+ ret['sessions_graphical'] += 1
+ seats.append(fields[3])
+ elif len(fields) == 5:
+ users['console'].append(fields[2])
+ ret['sessions_console'] += 1
+ seats.append(fields[3])
+
+ ret['users_graphical'] = len(set(users['graphical']))
+ ret['users_console'] = len(set(users['console']))
+ ret['users_remote'] = len(set(users['remote']))
+ ret['seats'] = len(set(seats))
+
+ return ret
diff --git a/collectors/python.d.plugin/logind/logind.conf b/collectors/python.d.plugin/logind/logind.conf
new file mode 100644
index 0000000..01a859d
--- /dev/null
+++ b/collectors/python.d.plugin/logind/logind.conf
@@ -0,0 +1,60 @@
+# netdata python.d.plugin configuration for logind
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
diff --git a/collectors/python.d.plugin/mdstat/Makefile.inc b/collectors/python.d.plugin/mdstat/Makefile.inc
new file mode 100644
index 0000000..5125a27
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mdstat/mdstat.chart.py
+dist_pythonconfig_DATA += mdstat/mdstat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mdstat/README.md mdstat/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mdstat/README.md b/collectors/python.d.plugin/mdstat/README.md
new file mode 100644
index 0000000..f88346e
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/README.md
@@ -0,0 +1,33 @@
+# mdstat
+
+> THIS MODULE IS OBSOLETE.
+> USE THE [PROC PLUGIN](../../proc.plugin) - IT IS MORE EFFICIENT
+
+---
+
+Module monitor /proc/mdstat
+
+It produces:
+
+1. **Health** Number of failed disks in every array (aggregate chart).
+
+2. **Disks stats**
+ * total (number of devices array ideally would have)
+ * inuse (number of devices currently are in use)
+
+3. **Current status**
+ * resync in percent
+ * recovery in percent
+ * reshape in percent
+ * check in percent
+
+4. **Operation status** (if resync/recovery/reshape/check is active)
+ * finish in minutes
+ * speed in megabytes/s
+
+### configuration
+No configuration is needed.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmdstat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/mdstat/mdstat.chart.py b/collectors/python.d.plugin/mdstat/mdstat.chart.py
new file mode 100644
index 0000000..b7306b6
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/mdstat.chart.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# Description: mdstat netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from collections import defaultdict
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+MDSTAT = '/proc/mdstat'
+MISMATCH_CNT = '/sys/block/{0}/md/mismatch_cnt'
+
+ORDER = ['mdstat_health']
+
+CHARTS = {
+ 'mdstat_health': {
+ 'options': [None, 'Faulty Devices In MD', 'failed disks', 'health', 'md.health', 'line'],
+ 'lines': []
+ }
+}
+
+RE_DISKS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\['
+ r'(?P<total_disks>[0-9]+)/'
+ r'(?P<inuse_disks>[0-9]+)\]')
+
+RE_STATUS = re.compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ '
+ r'(?P<operation>[a-z]+) =[ ]{1,2}'
+ r'(?P<operation_status>[0-9.]+).+finish='
+ r'(?P<finish_in>([0-9.]+))min speed='
+ r'(?P<speed>[0-9]+)')
+
+
+def md_charts(name):
+ order = [
+ '{0}_disks'.format(name),
+ '{0}_operation'.format(name),
+ '{0}_mismatch_cnt'.format(name),
+ '{0}_finish'.format(name),
+ '{0}_speed'.format(name)
+ ]
+
+ charts = dict()
+ charts[order[0]] = {
+ 'options': [None, 'Disks Stats', 'disks', name, 'md.disks', 'stacked'],
+ 'lines': [
+ ['{0}_total_disks'.format(name), 'total', 'absolute'],
+ ['{0}_inuse_disks'.format(name), 'inuse', 'absolute']
+ ]
+ }
+
+ charts[order[1]] = {
+ 'options': [None, 'Current Status', 'percent', name, 'md.status', 'line'],
+ 'lines': [
+ ['{0}_resync'.format(name), 'resync', 'absolute', 1, 100],
+ ['{0}_recovery'.format(name), 'recovery', 'absolute', 1, 100],
+ ['{0}_reshape'.format(name), 'reshape', 'absolute', 1, 100],
+ ['{0}_check'.format(name), 'check', 'absolute', 1, 100],
+ ]
+ }
+
+ charts[order[2]] = {
+ 'options': [None, 'Mismatch Count', 'unsynchronized blocks', name, 'md.mismatch_cnt', 'line'],
+ 'lines': [
+ ['{0}_mismatch_cnt'.format(name), 'count', 'absolute']
+ ]
+ }
+
+ charts[order[3]] = {
+ 'options': [None, 'Approximate Time Until Finish', 'seconds', name, 'md.rate', 'line'],
+ 'lines': [
+ ['{0}_finish_in'.format(name), 'finish in', 'absolute', 1, 1000]
+ ]
+ }
+
+ charts[order[4]] = {
+ 'options': [None, 'Operation Speed', 'KB/s', name, 'md.rate', 'line'],
+ 'lines': [
+ ['{0}_speed'.format(name), 'speed', 'absolute', 1, 1000]
+ ]
+ }
+
+ return order, charts
+
+
+class MD:
+ def __init__(self, raw_data):
+ self.name = raw_data['array']
+ self.d = raw_data
+
+ def data(self):
+ rv = {
+ 'total_disks': self.d['total_disks'],
+ 'inuse_disks': self.d['inuse_disks'],
+ 'health': int(self.d['total_disks']) - int(self.d['inuse_disks']),
+ 'resync': 0,
+ 'recovery': 0,
+ 'reshape': 0,
+ 'check': 0,
+ 'finish_in': 0,
+ 'speed': 0,
+ }
+
+ v = read_lines(MISMATCH_CNT.format(self.name))
+ if v:
+ rv['mismatch_cnt'] = v
+
+ if self.d.get('operation'):
+ rv[self.d['operation']] = float(self.d['operation_status']) * 100
+ rv['finish_in'] = float(self.d['finish_in']) * 1000 * 60
+ rv['speed'] = float(self.d['speed']) * 1000
+
+ return dict(('{0}_{1}'.format(self.name, k), v) for k, v in rv.items())
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.mds = list()
+
+ @staticmethod
+ def get_mds():
+ raw = read_lines(MDSTAT)
+
+ if not raw:
+ return None
+
+ return find_mds(raw)
+
+ def get_data(self):
+ """
+ Parse data from _get_raw_data()
+ :return: dict
+ """
+ mds = self.get_mds()
+
+ if not mds:
+ return None
+
+ data = dict()
+ for md in mds:
+ if md.name not in self.mds:
+ self.mds.append(md.name)
+ self.add_new_md_charts(md.name)
+ data.update(md.data())
+ return data
+
+ def check(self):
+ if not self.get_mds():
+ self.error('Failed to read data from {0} or there is no active arrays'.format(MDSTAT))
+ return False
+ return True
+
+ def add_new_md_charts(self, name):
+ order, charts = md_charts(name)
+
+ self.charts['mdstat_health'].add_dimension(['{0}_health'.format(name), name])
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dims = charts[chart_name]['lines']
+
+ chart = self.charts.add_chart(params)
+ for dim in dims:
+ chart.add_dimension(dim)
+
+
+def find_mds(raw_data):
+ data = defaultdict(str)
+ counter = 1
+
+ for row in (elem.strip() for elem in raw_data):
+ if not row:
+ counter += 1
+ continue
+ data[counter] = ' '.join([data[counter], row])
+
+ mds = list()
+
+ for v in data.values():
+ m = RE_DISKS.search(v)
+
+ if not m:
+ continue
+
+ d = m.groupdict()
+
+ m = RE_STATUS.search(v)
+ if m:
+ d.update(m.groupdict())
+
+ mds.append(MD(d))
+
+ return sorted(mds, key=lambda md: md.name)
+
+
+def read_lines(path):
+ try:
+ with open(path) as f:
+ return f.readlines()
+ except (IOError, OSError):
+ return None
diff --git a/collectors/python.d.plugin/mdstat/mdstat.conf b/collectors/python.d.plugin/mdstat/mdstat.conf
new file mode 100644
index 0000000..c72b638
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/mdstat.conf
@@ -0,0 +1,30 @@
+# netdata python.d.plugin configuration for mdstat
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
diff --git a/collectors/python.d.plugin/megacli/Makefile.inc b/collectors/python.d.plugin/megacli/Makefile.inc
new file mode 100644
index 0000000..83680d7
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += megacli/megacli.chart.py
+dist_pythonconfig_DATA += megacli/megacli.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += megacli/README.md megacli/Makefile.inc
+
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
new file mode 100644
index 0000000..e96015d
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -0,0 +1,50 @@
+# megacli
+
+Module collects adapter, physical drives and battery stats.
+
+**Requirements:**
+ * `megacli` program
+ * `sudo` program
+ * `netdata` user needs to be able to be able to sudo the `megacli` program without password
+
+To grab stats it executes:
+ * `sudo -n megacli -LDPDInfo -aAll`
+ * `sudo -n megacli -AdpBbuCmd -a0`
+
+
+It produces:
+
+1. **Adapter State**
+
+2. **Physical Drives Media Errors**
+
+3. **Physical Drives Predictive Failures**
+
+4. **Battery Relative State of Charge**
+
+5. **Battery Cycle Count**
+
+### prerequisite
+This module uses `megacli` which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `megacli` as root without password.
+
+Add to `sudoers`:
+
+ netdata ALL=(root) NOPASSWD: /path/to/megacli
+
+### configuration
+
+**megacli** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+```yaml
+megacli: yes
+```
+
+Battery stats disabled by default. To enable them modify `megacli.conf`.
+```yaml
+do_battery: yes
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmegacli%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
new file mode 100644
index 0000000..e1a05e4
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+# Description: megacli netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+
+disabled_by_default = True
+
+update_every = 5
+
+
+def adapter_charts(ads):
+ order = [
+ 'adapter_degraded',
+ ]
+
+ def dims(ad):
+ return [['adapter_{0}_degraded'.format(a.id), 'adapter {0}'.format(a.id)] for a in ad]
+
+ charts = {
+ 'adapter_degraded': {
+ 'options': [None, 'Adapter State', 'is degraded', 'adapter', 'megacli.adapter_degraded', 'line'],
+ 'lines': dims(ads)
+ },
+ }
+
+ return order, charts
+
+
+def pd_charts(pds):
+ order = [
+ 'pd_media_error',
+ 'pd_predictive_failure',
+ ]
+
+ def dims(k, pd):
+ return [['slot_{0}_{1}'.format(p.id, k), 'slot {0}'.format(p.id), 'incremental'] for p in pd]
+
+ charts = {
+ 'pd_media_error': {
+ 'options': [None, 'Physical Drives Media Errors', 'errors/s', 'pd', 'megacli.pd_media_error', 'line'],
+ 'lines': dims('media_error', pds)
+ },
+ 'pd_predictive_failure': {
+ 'options': [None, 'Physical Drives Predictive Failures', 'failures/s', 'pd',
+ 'megacli.pd_predictive_failure', 'line'],
+ 'lines': dims('predictive_failure', pds)
+ }
+ }
+
+ return order, charts
+
+
+def battery_charts(bats):
+ order = list()
+ charts = dict()
+
+ for b in bats:
+ order.append('bbu_{0}_relative_charge'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_relative_charge'.format(b.id): {
+ 'options': [None, 'Relative State of Charge', 'percentage', 'battery',
+ 'megacli.bbu_relative_charge', 'line'],
+ 'lines': [
+ ['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ for b in bats:
+ order.append('bbu_{0}_cycle_count'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_cycle_count'.format(b.id): {
+ 'options': [None, 'Cycle Count', 'cycle count', 'battery', 'megacli.bbu_cycle_count', 'line'],
+ 'lines': [
+ ['bbu_{0}_cycle_count'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ return order, charts
+
+
+RE_ADAPTER = re.compile(
+ r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z]+)'
+)
+
+RE_VD = re.compile(
+ r'Slot Number: ([0-9]+) Media Error Count: ([0-9]+) Predictive Failure Count: ([0-9]+)'
+)
+
+RE_BATTERY = re.compile(
+ r'BBU Capacity Info for Adapter: ([0-9]+) Relative State of Charge: ([0-9]+) % Cycle Count: ([0-9]+)'
+)
+
+
+def find_adapters(d):
+ keys = ('Adapter #', 'State')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [Adapter(*v) for v in RE_ADAPTER.findall(d)]
+
+
+def find_pds(d):
+ keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [PD(*v) for v in RE_VD.findall(d)]
+
+
+def find_batteries(d):
+ keys = ('BBU Capacity Info for Adapter', 'Relative State of Charge', 'Cycle Count')
+ d = ' '.join(v.strip() for v in d if v.strip().startswith(keys))
+ return [Battery(*v) for v in RE_BATTERY.findall(d)]
+
+
+class Adapter:
+ def __init__(self, n, state):
+ self.id = n
+ self.state = int(state == 'Degraded')
+
+ def data(self):
+ return {
+ 'adapter_{0}_degraded'.format(self.id): self.state,
+ }
+
+
+class PD:
+ def __init__(self, n, media_err, predict_fail):
+ self.id = n
+ self.media_err = media_err
+ self.predict_fail = predict_fail
+
+ def data(self):
+ return {
+ 'slot_{0}_media_error'.format(self.id): self.media_err,
+ 'slot_{0}_predictive_failure'.format(self.id): self.predict_fail,
+ }
+
+
+class Battery:
+ def __init__(self, adapt_id, rel_charge, cycle_count):
+ self.id = adapt_id
+ self.rel_charge = rel_charge
+ self.cycle_count = cycle_count
+
+ def data(self):
+ return {
+ 'bbu_{0}_relative_charge'.format(self.id): self.rel_charge,
+ 'bbu_{0}_cycle_count'.format(self.id): self.cycle_count,
+ }
+
+
+# TODO: hardcoded sudo...
+class Megacli:
+ def __init__(self):
+ self.s = find_binary('sudo')
+ self.m = find_binary('megacli')
+ self.sudo_check = [self.s, '-n', '-v']
+ self.disk_info = [self.s, '-n', self.m, '-LDPDInfo', '-aAll', '-NoLog']
+ self.battery_info = [self.s, '-n', self.m, '-AdpBbuCmd', '-a0', '-NoLog']
+
+ def __bool__(self):
+ return bool(self.s and self.m)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.do_battery = self.configuration.get('do_battery')
+ self.megacli = Megacli()
+
+ def check_sudo(self):
+ err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+ return True
+
+ def check_disk_info(self):
+ d = self._get_raw_data(command=self.megacli.disk_info)
+ if not d:
+ return False
+
+ ads = find_adapters(d)
+ pds = find_pds(d)
+
+ if not (ads and pds):
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.disk_info)))
+ return False
+
+ o, c = adapter_charts(ads)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ o, c = pd_charts(pds)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def check_battery(self):
+ d = self._get_raw_data(command=self.megacli.battery_info)
+ if not d:
+ return False
+
+ bats = find_batteries(d)
+
+ if not bats:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.battery_info)))
+ return False
+
+ o, c = battery_charts(bats)
+ self.order.extend(o)
+ self.definitions.update(c)
+ return True
+
+ def check(self):
+ if not self.megacli:
+ self.error('can\'t locate "sudo" or "megacli" binary')
+ return None
+
+ if not (self.check_sudo() and self.check_disk_info()):
+ return False
+
+ if self.do_battery:
+ self.do_battery = self.check_battery()
+
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ data.update(self.get_adapter_pd_data())
+
+ if self.do_battery:
+ data.update(self.get_battery_data())
+
+ return data or None
+
+ def get_adapter_pd_data(self):
+ raw = self._get_raw_data(command=self.megacli.disk_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for a in find_adapters(raw):
+ data.update(a.data())
+
+ for p in find_pds(raw):
+ data.update(p.data())
+
+ return data
+
+ def get_battery_data(self):
+ raw = self._get_raw_data(command=self.megacli.battery_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for b in find_batteries(raw):
+ data.update(b.data())
+
+ return data
diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf
new file mode 100644
index 0000000..1af4292
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.conf
@@ -0,0 +1,60 @@
+# netdata python.d.plugin configuration for megacli
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, megacli also supports the following:
+#
+# do_battery: yes/no # default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`).
+#
+# ----------------------------------------------------------------------
+# uncomment the line below to collect battery statistics
+# do_battery: yes
diff --git a/collectors/python.d.plugin/memcached/Makefile.inc b/collectors/python.d.plugin/memcached/Makefile.inc
new file mode 100644
index 0000000..e603571
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += memcached/memcached.chart.py
+dist_pythonconfig_DATA += memcached/memcached.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += memcached/README.md memcached/Makefile.inc
+
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
new file mode 100644
index 0000000..98627c4
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -0,0 +1,71 @@
+# memcached
+
+Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
+
+1. **Network** in kilobytes/s
+ * read
+ * written
+
+2. **Connections** per second
+ * current
+ * rejected
+ * total
+
+3. **Items** in cluster
+ * current
+ * total
+
+4. **Evicted and Reclaimed** items
+ * evicted
+ * reclaimed
+
+5. **GET** requests/s
+ * hits
+ * misses
+
+6. **GET rate** rate in requests/s
+ * rate
+
+7. **SET rate** rate in requests/s
+ * rate
+
+8. **DELETE** requests/s
+ * hits
+ * misses
+
+9. **CAS** requests/s
+ * hits
+ * misses
+ * bad value
+
+10. **Increment** requests/s
+ * hits
+ * misses
+
+11. **Decrement** requests/s
+ * hits
+ * misses
+
+12. **Touch** requests/s
+ * hits
+ * misses
+
+13. **Touch rate** rate in requests/s
+ * rate
+
+### configuration
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+```
+
+If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmemcached%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
new file mode 100644
index 0000000..9803dbb
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+# Description: memcached netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+
+ORDER = [
+ 'cache',
+ 'net',
+ 'connections',
+ 'items',
+ 'evicted_reclaimed',
+ 'get',
+ 'get_rate',
+ 'set_rate',
+ 'cas',
+ 'delete',
+ 'increment',
+ 'decrement',
+ 'touch',
+ 'touch_rate',
+]
+
+CHARTS = {
+ 'cache': {
+ 'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'],
+ 'lines': [
+ ['avail', 'available', 'absolute', 1, 1 << 20],
+ ['used', 'used', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
+ 'lines': [
+ ['bytes_read', 'in', 'incremental', 8, 1000],
+ ['bytes_written', 'out', 'incremental', -8, 1000],
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
+ 'lines': [
+ ['curr_connections', 'current', 'incremental'],
+ ['rejected_connections', 'rejected', 'incremental'],
+ ['total_connections', 'total', 'incremental']
+ ]
+ },
+ 'items': {
+ 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
+ 'lines': [
+ ['curr_items', 'current', 'absolute'],
+ ['total_items', 'total', 'absolute']
+ ]
+ },
+ 'evicted_reclaimed': {
+ 'options': [None, 'Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
+ 'lines': [
+ ['reclaimed', 'reclaimed', 'absolute'],
+ ['evictions', 'evicted', 'absolute']
+ ]
+ },
+ 'get': {
+ 'options': [None, 'Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
+ 'lines': [
+ ['get_hits', 'hits', 'percent-of-absolute-row'],
+ ['get_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'get_rate': {
+ 'options': [None, 'Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
+ 'lines': [
+ ['cmd_get', 'rate', 'incremental']
+ ]
+ },
+ 'set_rate': {
+ 'options': [None, 'Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
+ 'lines': [
+ ['cmd_set', 'rate', 'incremental']
+ ]
+ },
+ 'delete': {
+ 'options': [None, 'Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
+ 'lines': [
+ ['delete_hits', 'hits', 'percent-of-absolute-row'],
+ ['delete_misses', 'misses', 'percent-of-absolute-row'],
+ ]
+ },
+ 'cas': {
+ 'options': [None, 'Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
+ 'lines': [
+ ['cas_hits', 'hits', 'percent-of-absolute-row'],
+ ['cas_misses', 'misses', 'percent-of-absolute-row'],
+ ['cas_badval', 'bad value', 'percent-of-absolute-row']
+ ]
+ },
+ 'increment': {
+ 'options': [None, 'Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
+ 'lines': [
+ ['incr_hits', 'hits', 'percent-of-absolute-row'],
+ ['incr_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'decrement': {
+ 'options': [None, 'Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
+ 'lines': [
+ ['decr_hits', 'hits', 'percent-of-absolute-row'],
+ ['decr_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'touch': {
+ 'options': [None, 'Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
+ 'lines': [
+ ['touch_hits', 'hits', 'percent-of-absolute-row'],
+ ['touch_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'touch_rate': {
+ 'options': [None, 'Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
+ 'lines': [
+ ['cmd_touch', 'rate', 'incremental']
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.request = 'stats\r\n'
+ self.host = 'localhost'
+ self.port = 11211
+ self._keep_alive = True
+ self.unix_socket = None
+
+ def _get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ response = self._get_raw_data()
+ if response is None:
+ # error has already been logged
+ return None
+
+ if response.startswith('ERROR'):
+ self.error('received ERROR')
+ return None
+
+ try:
+ parsed = response.split('\n')
+ except AttributeError:
+ self.error('response is invalid/empty')
+ return None
+
+ # split the response
+ data = {}
+ for line in parsed:
+ if line.startswith('STAT'):
+ try:
+ t = line[5:].split(' ')
+ data[t[0]] = t[1]
+ except (IndexError, ValueError):
+ self.debug('invalid line received: ' + str(line))
+
+ if not data:
+ self.error("received data doesn't have any records")
+ return None
+
+ # custom calculations
+ try:
+ data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])
+ data['used'] = int(data['bytes'])
+ except (KeyError, ValueError, TypeError):
+ pass
+
+ return data
+
+ def _check_raw_data(self, data):
+ if data.endswith('END\r\n'):
+ self.debug('received full response from memcached')
+ return True
+
+ self.debug('waiting more data from memcached')
+ return False
+
+ def check(self):
+ """
+ Parse configuration, check if memcached is available
+ :return: boolean
+ """
+ self._parse_config()
+ data = self._get_data()
+ if data is None:
+ return False
+ return True
diff --git a/collectors/python.d.plugin/memcached/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf
new file mode 100644
index 0000000..3286b46
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/memcached.conf
@@ -0,0 +1,90 @@
+# netdata python.d.plugin configuration for memcached
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, memcached also supports the following:
+#
+# socket: 'path/to/memcached.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 11211
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 11211
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 11211
+
diff --git a/collectors/python.d.plugin/mongodb/Makefile.inc b/collectors/python.d.plugin/mongodb/Makefile.inc
new file mode 100644
index 0000000..784945a
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mongodb/mongodb.chart.py
+dist_pythonconfig_DATA += mongodb/mongodb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mongodb/README.md mongodb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
new file mode 100644
index 0000000..ac8930d
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -0,0 +1,170 @@
+# mongodb
+
+Module monitor mongodb performance and health metrics
+
+**Requirements:**
+ * `python-pymongo` package v2.4+.
+
+You need to install it manually.
+
+
+Number of charts depends on mongodb version, storage engine and other features (replication):
+
+1. **Read requests**:
+ * query
+ * getmore (operation the cursor executes to get additional data from query)
+
+2. **Write requests**:
+ * insert
+ * delete
+ * update
+
+3. **Active clients**:
+ * readers (number of clients with read operations in progress or queued)
+ * writers (number of clients with write operations in progress or queued)
+
+4. **Journal transactions**:
+ * commits (count of transactions that have been written to the journal)
+
+5. **Data written to the journal**:
+ * volume (volume of data)
+
+6. **Background flush** (MMAPv1):
+ * average ms (average time taken by flushes to execute)
+ * last ms (time taken by the last flush)
+
+8. **Read tickets** (WiredTiger):
+ * in use (number of read tickets in use)
+ * available (number of available read tickets remaining)
+
+9. **Write tickets** (WiredTiger):
+ * in use (number of write tickets in use)
+ * available (number of available write tickets remaining)
+
+10. **Cursors**:
+ * opened (number of cursors currently opened by MongoDB for clients)
+ * timedOut (number of cursors that have timed)
+ * noTimeout (number of open cursors with timeout disabled)
+
+11. **Connections**:
+ * connected (number of clients currently connected to the database server)
+ * unused (number of unused connections available for new clients)
+
+12. **Memory usage metrics**:
+ * virtual
+ * resident (amount of memory used by the database process)
+ * mapped
+ * non mapped
+
+13. **Page faults**:
+ * page faults (number of times MongoDB had to request from disk)
+
+14. **Cache metrics** (WiredTiger):
+ * percentage of bytes currently in the cache (amount of space taken by cached data)
+ * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
+
+15. **Pages evicted from cache** (WiredTiger):
+ * modified
+ * unmodified
+
+16. **Queued requests**:
+ * readers (number of read request currently queued)
+ * writers (number of write request currently queued)
+
+17. **Errors**:
+ * msg (number of message assertions raised)
+ * warning (number of warning assertions raised)
+ * regular (number of regular assertions raised)
+ * user (number of assertions corresponding to errors generated by users)
+
+18. **Storage metrics** (one chart for every database)
+ * dataSize (size of all documents + padding in the database)
+ * indexSize (size of all indexes in the database)
+ * storageSize (size of all extents in the database)
+
+19. **Documents in the database** (one chart for all databases)
+ * documents (number of objects in the database among all the collections)
+
+20. **tcmalloc metrics**
+ * central cache free
+ * current total thread cache
+ * pageheap free
+ * pageheap unmapped
+ * thread cache free
+ * transfer cache free
+ * heap size
+
+21. **Commands total/failed rate**
+ * count
+ * createIndex
+ * delete
+ * eval
+ * findAndModify
+ * insert
+
+22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
+ * Global lock
+ * Database lock
+ * Collection lock
+ * Metadata lock
+ * oplog lock
+
+23. **Replica set members state**
+ * state
+
+24. **Oplog window**
+ * window (interval of time between the oldest and the latest entries in the oplog)
+
+25. **Replication lag**
+ * member (time when last entry from the oplog was applied for every member)
+
+26. **Replication set member heartbeat latency**
+ * member (time when last heartbeat was received from replica set member)
+
+### prerequisite
+Create a read-only user for the netdata in the admin database.
+
+1. Authenticate as the admin user.
+
+```
+use admin
+db.auth("admin", "<MONGODB_ADMIN_PASSWORD>")
+```
+
+2. Create a user.
+
+```
+# MongoDB 2.x.
+db.addUser("netdata", "<UNIQUE_PASSWORD>", true)
+
+# MongoDB 3.x or higher.
+db.createUser({
+ "user":"netdata",
+ "pwd": "<UNIQUE_PASSWORD>",
+ "roles" : [
+ {role: 'read', db: 'admin' },
+ {role: 'clusterMonitor', db: 'admin'},
+ {role: 'read', db: 'local' }
+ ]
+})
+```
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 27017
+ user : 'netdata'
+ pass : 'netdata'
+
+```
+
+If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
new file mode 100644
index 0000000..92740ff
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
@@ -0,0 +1,727 @@
+# -*- coding: utf-8 -*-
+# Description: mongodb netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from copy import deepcopy
+from datetime import datetime
+from sys import exc_info
+
+try:
+ from pymongo import MongoClient, ASCENDING, DESCENDING
+ from pymongo.errors import PyMongoError
+ PYMONGO = True
+except ImportError:
+ PYMONGO = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+REPL_SET_STATES = [
+ ('1', 'primary'),
+ ('8', 'down'),
+ ('2', 'secondary'),
+ ('3', 'recovering'),
+ ('5', 'startup2'),
+ ('4', 'fatal'),
+ ('7', 'arbiter'),
+ ('6', 'unknown'),
+ ('9', 'rollback'),
+ ('10', 'removed'),
+ ('0', 'startup')
+]
+
+
+def multiply_by_100(value):
+ return value * 100
+
+
+DEFAULT_METRICS = [
+ ('opcounters.delete', None, None),
+ ('opcounters.update', None, None),
+ ('opcounters.insert', None, None),
+ ('opcounters.query', None, None),
+ ('opcounters.getmore', None, None),
+ ('globalLock.activeClients.readers', 'activeClients_readers', None),
+ ('globalLock.activeClients.writers', 'activeClients_writers', None),
+ ('connections.available', 'connections_available', None),
+ ('connections.current', 'connections_current', None),
+ ('mem.mapped', None, None),
+ ('mem.resident', None, None),
+ ('mem.virtual', None, None),
+ ('globalLock.currentQueue.readers', 'currentQueue_readers', None),
+ ('globalLock.currentQueue.writers', 'currentQueue_writers', None),
+ ('asserts.msg', None, None),
+ ('asserts.regular', None, None),
+ ('asserts.user', None, None),
+ ('asserts.warning', None, None),
+ ('extra_info.page_faults', None, None),
+ ('metrics.record.moves', None, None),
+ ('backgroundFlushing.average_ms', None, multiply_by_100),
+ ('backgroundFlushing.last_ms', None, multiply_by_100),
+ ('backgroundFlushing.flushes', None, multiply_by_100),
+ ('metrics.cursor.timedOut', None, None),
+ ('metrics.cursor.open.total', 'cursor_total', None),
+ ('metrics.cursor.open.noTimeout', None, None),
+ ('cursors.timedOut', None, None),
+ ('cursors.totalOpen', 'cursor_total', None)
+]
+
+DUR = [
+ ('dur.commits', None, None),
+ ('dur.journaledMB', None, multiply_by_100)
+]
+
+WIREDTIGER = [
+ ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None),
+ ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None),
+ ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None),
+ ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None),
+ ('wiredTiger.cache.bytes currently in the cache', None, None),
+ ('wiredTiger.cache.tracked dirty bytes in the cache', None, None),
+ ('wiredTiger.cache.maximum bytes configured', None, None),
+ ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None),
+ ('wiredTiger.cache.modified pages evicted', 'modified', None)
+]
+
+TCMALLOC = [
+ ('tcmalloc.generic.current_allocated_bytes', None, None),
+ ('tcmalloc.generic.heap_size', None, None),
+ ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None),
+ ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None),
+ ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None),
+ ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None)
+]
+
+COMMANDS = [
+ ('metrics.commands.count.total', 'count_total', None),
+ ('metrics.commands.createIndexes.total', 'createIndexes_total', None),
+ ('metrics.commands.delete.total', 'delete_total', None),
+ ('metrics.commands.eval.total', 'eval_total', None),
+ ('metrics.commands.findAndModify.total', 'findAndModify_total', None),
+ ('metrics.commands.insert.total', 'insert_total', None),
+ ('metrics.commands.delete.total', 'delete_total', None),
+ ('metrics.commands.count.failed', 'count_failed', None),
+ ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None),
+ ('metrics.commands.delete.failed', 'delete_failed', None),
+ ('metrics.commands.eval.failed', 'eval_failed', None),
+ ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None),
+ ('metrics.commands.insert.failed', 'insert_failed', None),
+ ('metrics.commands.delete.failed', 'delete_failed', None)
+]
+
+LOCKS = [
+ ('locks.Collection.acquireCount.R', 'Collection_R', None),
+ ('locks.Collection.acquireCount.r', 'Collection_r', None),
+ ('locks.Collection.acquireCount.W', 'Collection_W', None),
+ ('locks.Collection.acquireCount.w', 'Collection_w', None),
+ ('locks.Database.acquireCount.R', 'Database_R', None),
+ ('locks.Database.acquireCount.r', 'Database_r', None),
+ ('locks.Database.acquireCount.W', 'Database_W', None),
+ ('locks.Database.acquireCount.w', 'Database_w', None),
+ ('locks.Global.acquireCount.R', 'Global_R', None),
+ ('locks.Global.acquireCount.r', 'Global_r', None),
+ ('locks.Global.acquireCount.W', 'Global_W', None),
+ ('locks.Global.acquireCount.w', 'Global_w', None),
+ ('locks.Metadata.acquireCount.R', 'Metadata_R', None),
+ ('locks.Metadata.acquireCount.w', 'Metadata_w', None),
+ ('locks.oplog.acquireCount.r', 'oplog_r', None),
+ ('locks.oplog.acquireCount.w', 'oplog_w', None)
+]
+
+DBSTATS = [
+ 'dataSize',
+ 'indexSize',
+ 'storageSize',
+ 'objects'
+]
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'read_operations',
+ 'write_operations',
+ 'active_clients',
+ 'journaling_transactions',
+ 'journaling_volume',
+ 'background_flush_average',
+ 'background_flush_last',
+ 'background_flush_rate',
+ 'wiredtiger_read',
+ 'wiredtiger_write',
+ 'cursors',
+ 'connections',
+ 'memory',
+ 'page_faults',
+ 'queued_requests',
+ 'record_moves',
+ 'wiredtiger_cache',
+ 'wiredtiger_pages_evicted',
+ 'asserts',
+ 'locks_collection',
+ 'locks_database',
+ 'locks_global',
+ 'locks_metadata',
+ 'locks_oplog',
+ 'dbstats_objects',
+ 'tcmalloc_generic',
+ 'tcmalloc_metrics',
+ 'command_total_rate',
+ 'command_failed_rate'
+]
+
+CHARTS = {
+ 'read_operations': {
+ 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics',
+ 'mongodb.read_operations', 'line'],
+ 'lines': [
+ ['query', None, 'incremental'],
+ ['getmore', None, 'incremental']
+ ]
+ },
+ 'write_operations': {
+ 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
+ 'mongodb.write_operations', 'line'],
+ 'lines': [
+ ['insert', None, 'incremental'],
+ ['update', None, 'incremental'],
+ ['delete', None, 'incremental']
+ ]
+ },
+ 'active_clients': {
+ 'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
+ 'throughput metrics', 'mongodb.active_clients', 'line'],
+ 'lines': [
+ ['activeClients_readers', 'readers', 'absolute'],
+ ['activeClients_writers', 'writers', 'absolute']
+ ]
+ },
+ 'journaling_transactions': {
+ 'options': [None, 'Transactions that have been written to the journal', 'commits',
+ 'database performance', 'mongodb.journaling_transactions', 'line'],
+ 'lines': [
+ ['commits', None, 'absolute']
+ ]
+ },
+ 'journaling_volume': {
+ 'options': [None, 'Volume of data written to the journal', 'MiB', 'database performance',
+ 'mongodb.journaling_volume', 'line'],
+ 'lines': [
+ ['journaledMB', 'volume', 'absolute', 1, 100]
+ ]
+ },
+ 'background_flush_average': {
+ 'options': [None, 'Average time taken by flushes to execute', 'milliseconds', 'database performance',
+ 'mongodb.background_flush_average', 'line'],
+ 'lines': [
+ ['average_ms', 'time', 'absolute', 1, 100]
+ ]
+ },
+ 'background_flush_last': {
+ 'options': [None, 'Time taken by the last flush operation to execute', 'milliseconds', 'database performance',
+ 'mongodb.background_flush_last', 'line'],
+ 'lines': [
+ ['last_ms', 'time', 'absolute', 1, 100]
+ ]
+ },
+ 'background_flush_rate': {
+ 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
+ 'lines': [
+ ['flushes', 'flushes', 'incremental', 1, 1]
+ ]
+ },
+ 'wiredtiger_read': {
+ 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
+ 'mongodb.wiredtiger_read', 'stacked'],
+ 'lines': [
+ ['wiredTigerRead_available', 'available', 'absolute', 1, 1],
+ ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
+ ]
+ },
+ 'wiredtiger_write': {
+ 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
+ 'mongodb.wiredtiger_write', 'stacked'],
+ 'lines': [
+ ['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
+ ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
+ ]
+ },
+ 'cursors': {
+ 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
+ 'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
+ 'lines': [
+ ['cursor_total', 'openned', 'absolute', 1, 1],
+ ['noTimeout', None, 'absolute', 1, 1],
+ ['timedOut', None, 'incremental', 1, 1]
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Currently connected clients and unused connections', 'connections',
+ 'resource utilization', 'mongodb.connections', 'stacked'],
+ 'lines': [
+ ['connections_available', 'unused', 'absolute', 1, 1],
+ ['connections_current', 'connected', 'absolute', 1, 1]
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory metrics', 'MiB', 'resource utilization', 'mongodb.memory', 'stacked'],
+ 'lines': [
+ ['virtual', None, 'absolute', 1, 1],
+ ['resident', None, 'absolute', 1, 1],
+ ['nonmapped', None, 'absolute', 1, 1],
+ ['mapped', None, 'absolute', 1, 1]
+ ]
+ },
+ 'page_faults': {
+ 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
+ 'resource utilization', 'mongodb.page_faults', 'line'],
+ 'lines': [
+ ['page_faults', None, 'incremental', 1, 1]
+ ]
+ },
+ 'queued_requests': {
+ 'options': [None, 'Currently queued read and write requests', 'requests', 'resource saturation',
+ 'mongodb.queued_requests', 'line'],
+ 'lines': [
+ ['currentQueue_readers', 'readers', 'absolute', 1, 1],
+ ['currentQueue_writers', 'writers', 'absolute', 1, 1]
+ ]
+ },
+ 'record_moves': {
+ 'options': [None, 'Number of times documents had to be moved on-disk', 'number',
+ 'resource saturation', 'mongodb.record_moves', 'line'],
+ 'lines': [
+ ['moves', None, 'incremental', 1, 1]
+ ]
+ },
+ 'asserts': {
+ 'options': [
+ None,
+ 'Number of message, warning, regular, corresponding to errors generated by users assertions raised',
+ 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
+ 'lines': [
+ ['msg', None, 'incremental', 1, 1],
+ ['warning', None, 'incremental', 1, 1],
+ ['regular', None, 'incremental', 1, 1],
+ ['user', None, 'incremental', 1, 1]
+ ]
+ },
+ 'wiredtiger_cache': {
+ 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
+ 'percentage', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
+ 'lines': [
+ ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
+ ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
+ ]
+ },
+ 'wiredtiger_pages_evicted': {
+ 'options': [None, 'Pages evicted from the cache',
+ 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
+ 'lines': [
+ ['unmodified', None, 'absolute', 1, 1],
+ ['modified', None, 'absolute', 1, 1]
+ ]
+ },
+ 'dbstats_objects': {
+ 'options': [None, 'Number of documents in the database among all the collections', 'documents',
+ 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
+ 'lines': []
+ },
+ 'tcmalloc_generic': {
+ 'options': [None, 'Tcmalloc generic metrics', 'MiB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
+ 'lines': [
+ ['current_allocated_bytes', 'allocated', 'absolute', 1, 1 << 20],
+ ['heap_size', 'heap_size', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'tcmalloc_metrics': {
+ 'options': [None, 'Tcmalloc metrics', 'KiB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
+ 'lines': [
+ ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
+ ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
+ ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024],
+ ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
+ ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
+ ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
+ ]
+ },
+ 'command_total_rate': {
+ 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
+ 'lines': [
+ ['count_total', 'count', 'incremental', 1, 1],
+ ['createIndexes_total', 'createIndexes', 'incremental', 1, 1],
+ ['delete_total', 'delete', 'incremental', 1, 1],
+ ['eval_total', 'eval', 'incremental', 1, 1],
+ ['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
+ ['insert_total', 'insert', 'incremental', 1, 1],
+ ['update_total', 'update', 'incremental', 1, 1]
+ ]
+ },
+ 'command_failed_rate': {
+ 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
+ 'lines': [
+ ['count_failed', 'count', 'incremental', 1, 1],
+ ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1],
+ ['delete_failed', 'delete', 'incremental', 1, 1],
+ ['eval_failed', 'eval', 'incremental', 1, 1],
+ ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
+ ['insert_failed', 'insert', 'incremental', 1, 1],
+ ['update_failed', 'update', 'incremental', 1, 1]
+ ]
+ },
+ 'locks_collection': {
+ 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
+ 'lines': [
+ ['Collection_R', 'shared', 'incremental'],
+ ['Collection_W', 'exclusive', 'incremental'],
+ ['Collection_r', 'intent_shared', 'incremental'],
+ ['Collection_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_database': {
+ 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
+ 'lines': [
+ ['Database_R', 'shared', 'incremental'],
+ ['Database_W', 'exclusive', 'incremental'],
+ ['Database_r', 'intent_shared', 'incremental'],
+ ['Database_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_global': {
+ 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
+ 'lines': [
+ ['Global_R', 'shared', 'incremental'],
+ ['Global_W', 'exclusive', 'incremental'],
+ ['Global_r', 'intent_shared', 'incremental'],
+ ['Global_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_metadata': {
+ 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
+ 'lines': [
+ ['Metadata_R', 'shared', 'incremental'],
+ ['Metadata_w', 'intent_exclusive', 'incremental']
+ ]
+ },
+ 'locks_oplog': {
+ 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
+ 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
+ 'lines': [
+ ['oplog_r', 'intent_shared', 'incremental'],
+ ['oplog_w', 'intent_exclusive', 'incremental']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER[:]
+ self.definitions = deepcopy(CHARTS)
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 27017)
+ self.timeout = self.configuration.get('timeout', 100)
+ self.metrics_to_collect = deepcopy(DEFAULT_METRICS)
+ self.connection = None
+ self.do_replica = None
+ self.databases = list()
+
+ def check(self):
+ if not PYMONGO:
+ self.error('Pymongo package v2.4+ is needed to use mongodb.chart.py')
+ return False
+ self.connection, server_status, error = self._create_connection()
+ if error:
+ self.error(error)
+ return False
+
+ self.build_metrics_to_collect_(server_status)
+
+ try:
+ data = self._get_data()
+ except (LookupError, SyntaxError, AttributeError):
+ self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
+ return False
+ if isinstance(data, dict) and data:
+ self._data_from_check = data
+ self.create_charts_(server_status)
+ return True
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
+
+ def build_metrics_to_collect_(self, server_status):
+
+ self.do_replica = 'repl' in server_status
+ if 'dur' in server_status:
+ self.metrics_to_collect.extend(DUR)
+ if 'tcmalloc' in server_status:
+ self.metrics_to_collect.extend(TCMALLOC)
+ if 'commands' in server_status['metrics']:
+ self.metrics_to_collect.extend(COMMANDS)
+ if 'wiredTiger' in server_status:
+ self.metrics_to_collect.extend(WIREDTIGER)
+ if 'Collection' in server_status['locks']:
+ self.metrics_to_collect.extend(LOCKS)
+
+ def create_charts_(self, server_status):
+
+ if 'dur' not in server_status:
+ self.order.remove('journaling_transactions')
+ self.order.remove('journaling_volume')
+
+ if 'backgroundFlushing' not in server_status:
+ self.order.remove('background_flush_average')
+ self.order.remove('background_flush_last')
+ self.order.remove('background_flush_rate')
+
+ if 'wiredTiger' not in server_status:
+ self.order.remove('wiredtiger_write')
+ self.order.remove('wiredtiger_read')
+ self.order.remove('wiredtiger_cache')
+
+ if 'tcmalloc' not in server_status:
+ self.order.remove('tcmalloc_generic')
+ self.order.remove('tcmalloc_metrics')
+
+ if 'commands' not in server_status['metrics']:
+ self.order.remove('command_total_rate')
+ self.order.remove('command_failed_rate')
+
+ if 'Collection' not in server_status['locks']:
+ self.order.remove('locks_collection')
+ self.order.remove('locks_database')
+ self.order.remove('locks_global')
+ self.order.remove('locks_metadata')
+
+ if 'oplog' not in server_status['locks']:
+ self.order.remove('locks_oplog')
+
+ for dbase in self.databases:
+ self.order.append('_'.join([dbase, 'dbstats']))
+ self.definitions['_'.join([dbase, 'dbstats'])] = {
+ 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB',
+ 'storage size metrics', 'mongodb.dbstats', 'line'],
+ 'lines': [
+ ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024],
+ ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024],
+ ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024]
+ ]}
+ self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute'])
+
+ if self.do_replica:
+ def create_lines(hosts, string):
+ lines = list()
+ for host in hosts:
+ dim_id = '_'.join([host, string])
+ lines.append([dim_id, host, 'absolute', 1, 1000])
+ return lines
+
+ def create_state_lines(states):
+ lines = list()
+ for state, description in states:
+ dim_id = '_'.join([host, 'state', state])
+ lines.append([dim_id, description, 'absolute', 1, 1])
+ return lines
+
+ all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list())
+ this_host = server_status['repl']['me']
+ other_hosts = [host for host in all_hosts if host != this_host]
+
+ if 'local' in self.databases:
+ self.order.append('oplog_window')
+ self.definitions['oplog_window'] = {
+ 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog',
+ 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'],
+ 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]}
+ # Create "heartbeat delay" chart
+ self.order.append('heartbeat_delay')
+ self.definitions['heartbeat_delay'] = {
+ 'options': [
+ None,
+ 'Time when last heartbeat was received from the replica set member (lastHeartbeatRecv)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
+ 'lines': create_lines(other_hosts, 'heartbeat_lag')}
+ # Create "optimedate delay" chart
+ self.order.append('optimedate_delay')
+ self.definitions['optimedate_delay'] = {
+ 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)',
+ 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'],
+ 'lines': create_lines(all_hosts, 'optimedate')}
+ # Create "replica set members state" chart
+ for host in all_hosts:
+ chart_name = '_'.join([host, 'state'])
+ self.order.append(chart_name)
+ self.definitions[chart_name] = {
+ 'options': [None, 'Replica set member (%s) current state' % host, 'state',
+ 'replication and oplog', 'mongodb.replication_state', 'line'],
+ 'lines': create_state_lines(REPL_SET_STATES)}
+
+ def _get_raw_data(self):
+ raw_data = dict()
+
+ raw_data.update(self.get_server_status() or dict())
+ raw_data.update(self.get_db_stats() or dict())
+ raw_data.update(self.get_repl_set_get_status() or dict())
+ raw_data.update(self.get_get_replication_info() or dict())
+
+ return raw_data or None
+
+ def get_server_status(self):
+ raw_data = dict()
+ try:
+ raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
+ except PyMongoError:
+ return None
+ else:
+ return raw_data
+
+ def get_db_stats(self):
+ if not self.databases:
+ return None
+
+ raw_data = dict()
+ raw_data['dbStats'] = dict()
+ try:
+ for dbase in self.databases:
+ raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
+ return raw_data
+ except PyMongoError:
+ return None
+
+ def get_repl_set_get_status(self):
+ if not self.do_replica:
+ return None
+
+ raw_data = dict()
+ try:
+ raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
+ return raw_data
+ except PyMongoError:
+ return None
+
+ def get_get_replication_info(self):
+ if not (self.do_replica and 'local' in self.databases):
+ return None
+
+ raw_data = dict()
+ raw_data['getReplicationInfo'] = dict()
+ try:
+ raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
+ '$natural', ASCENDING).limit(1)[0]
+ raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
+ '$natural', DESCENDING).limit(1)[0]
+ return raw_data
+ except PyMongoError:
+ return None
+
+ def _get_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+
+ if not raw_data:
+ return None
+
+ to_netdata = dict()
+ serverStatus = raw_data['serverStatus']
+ dbStats = raw_data.get('dbStats')
+ replSetGetStatus = raw_data.get('replSetGetStatus')
+ getReplicationInfo = raw_data.get('getReplicationInfo')
+ utc_now = datetime.utcnow()
+
+ # serverStatus
+ for metric, new_name, func in self.metrics_to_collect:
+ value = serverStatus
+ for key in metric.split('.'):
+ try:
+ value = value[key]
+ except KeyError:
+ break
+
+ if not isinstance(value, dict) and key:
+ to_netdata[new_name or key] = value if not func else func(value)
+
+ to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal',
+ to_netdata['mapped'])
+ if to_netdata.get('maximum bytes configured'):
+ maximum = to_netdata['maximum bytes configured']
+ to_netdata['wiredTiger_percent_clean'] = int(to_netdata['bytes currently in the cache']
+ * 100 / maximum * 1000)
+ to_netdata['wiredTiger_percent_dirty'] = int(to_netdata['tracked dirty bytes in the cache']
+ * 100 / maximum * 1000)
+
+ # dbStats
+ if dbStats:
+ for dbase in dbStats:
+ for metric in DBSTATS:
+ key = '_'.join([dbase, metric])
+ to_netdata[key] = dbStats[dbase][metric]
+
+ # replSetGetStatus
+ if replSetGetStatus:
+ other_hosts = list()
+ members = replSetGetStatus['members']
+ unix_epoch = datetime(1970, 1, 1, 0, 0)
+
+ for member in members:
+ if not member.get('self'):
+ other_hosts.append(member)
+ # Replica set time diff between current time and time when last entry from the oplog was applied
+ if member.get('optimeDate', unix_epoch) != unix_epoch:
+ member_optimedate = member['name'] + '_optimedate'
+ to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'],
+ multiplier=1000))})
+ # Replica set members state
+ member_state = member['name'] + '_state'
+ for elem in REPL_SET_STATES:
+ state = elem[0]
+ to_netdata.update({'_'.join([member_state, state]): 0})
+ to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']})
+ # Heartbeat lag calculation
+ for other in other_hosts:
+ if other['lastHeartbeatRecv'] != unix_epoch:
+ node = other['name'] + '_heartbeat_lag'
+ to_netdata[node] = int(delta_calculation(delta=utc_now - other['lastHeartbeatRecv'],
+ multiplier=1000))
+
+ if getReplicationInfo:
+ first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime()
+ last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime()
+ to_netdata['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000))
+
+ return to_netdata
+
+ def _create_connection(self):
+ conn_vars = {'host': self.host, 'port': self.port}
+ if hasattr(MongoClient, 'server_selection_timeout'):
+ conn_vars.update({'serverselectiontimeoutms': self.timeout})
+ try:
+ connection = MongoClient(**conn_vars)
+ if self.user and self.password:
+ connection.admin.authenticate(name=self.user, password=self.password)
+ # elif self.user:
+ # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509')
+ server_status = connection.admin.command('serverStatus')
+ except PyMongoError as error:
+ return None, None, str(error)
+ else:
+ try:
+ self.databases = connection.database_names()
+ except PyMongoError as error:
+ self.info('Can\'t collect databases: %s' % str(error))
+ return connection, server_status, None
+
+
+def delta_calculation(delta, multiplier=1):
+ if hasattr(delta, 'total_seconds'):
+ return delta.total_seconds() * multiplier
+ return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf
new file mode 100644
index 0000000..f69acac
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/mongodb.conf
@@ -0,0 +1,82 @@
+# netdata python.d.plugin configuration for mongodb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, mongodb also supports the following:
+#
+# host: 'IP or HOSTNAME' # type <str> the host to connect to
+# port: PORT # type <int> the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the mongodb username to use
+# pass: 'password' # the mongodb password to use
+#
+
+# ----------------------------------------------------------------------
+# to connect to the mongodb on localhost, without a password:
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 27017
diff --git a/collectors/python.d.plugin/monit/Makefile.inc b/collectors/python.d.plugin/monit/Makefile.inc
new file mode 100644
index 0000000..4a3673f
--- /dev/null
+++ b/collectors/python.d.plugin/monit/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += monit/monit.chart.py
+dist_pythonconfig_DATA += monit/monit.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += monit/README.md monit/Makefile.inc
+
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
new file mode 100644
index 0000000..0f69aff
--- /dev/null
+++ b/collectors/python.d.plugin/monit/README.md
@@ -0,0 +1,35 @@
+# monit
+
+Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
+
+1. **Filesystems**
+ * Filesystems
+ * Directories
+ * Files
+ * Pipes
+
+2. **Applications**
+ * Processes (+threads/childs)
+ * Programs
+
+3. **Network**
+ * Hosts (+latency)
+ * Network interfaces
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://localhost:2812'
+ user: : admin
+ pass: : monit
+```
+
+If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmonit%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
new file mode 100644
index 0000000..3ac0032
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.chart.py
@@ -0,0 +1,178 @@
+# -*- coding: utf-8 -*-
+# Description: monit netdata python.d module
+# Author: Evgeniy K. (n0guest)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import xml.etree.ElementTree as ET
+from bases.FrameworkServices.UrlService import UrlService
+
+
+# see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
+MONIT_SERVICE_NAMES = [
+ 'Filesystem',
+ 'Directory',
+ 'File',
+ 'Process',
+ 'Host',
+ 'System',
+ 'Fifo',
+ 'Program',
+ 'Net',
+]
+
+DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8]
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'filesystem',
+ 'directory',
+ 'file',
+ 'process',
+ 'process_uptime',
+ 'process_threads',
+ 'process_children',
+ 'host',
+ 'host_latency',
+ 'system',
+ 'fifo',
+ 'program',
+ 'net'
+]
+CHARTS = {
+ 'filesystem': {
+ 'options': ['filesystems', 'Filesystems', 'filesystems', 'filesystem', 'monit.filesystems', 'line'],
+ 'lines': []
+ },
+ 'directory': {
+ 'options': ['directories', 'Directories', 'directories', 'filesystem', 'monit.directories', 'line'],
+ 'lines': []
+ },
+ 'file': {
+ 'options': ['files', 'Files', 'files', 'filesystem', 'monit.files', 'line'],
+ 'lines': []
+ },
+ 'fifo': {
+ 'options': ['fifos', 'Pipes (fifo)', 'pipes', 'filesystem', 'monit.fifos', 'line'],
+ 'lines': []
+ },
+ 'program': {
+ 'options': ['programs', 'Programs statuses', 'programs', 'applications', 'monit.programs', 'line'],
+ 'lines': []
+ },
+ 'process': {
+ 'options': ['processes', 'Processes statuses', 'processes', 'applications', 'monit.services', 'line'],
+ 'lines': []
+ },
+ 'process_uptime': {
+ 'options': ['processes uptime', 'Processes uptime', 'seconds', 'applications',
+ 'monit.process_uptime', 'line', 'hidden'],
+ 'lines': []
+ },
+ 'process_threads': {
+ 'options': ['processes threads', 'Processes threads', 'threads', 'applications',
+ 'monit.process_threads', 'line'],
+ 'lines': []
+ },
+ 'process_children': {
+ 'options': ['processes childrens', 'Child processes', 'childrens', 'applications',
+ 'monit.process_childrens', 'line'],
+ 'lines': []
+ },
+ 'host': {
+ 'options': ['hosts', 'Hosts', 'hosts', 'network', 'monit.hosts', 'line'],
+ 'lines': []
+ },
+ 'host_latency': {
+ 'options': ['hosts latency', 'Hosts latency', 'milliseconds/s', 'network', 'monit.host_latency', 'line'],
+ 'lines': []
+ },
+ 'net': {
+ 'options': ['interfaces', 'Network interfaces and addresses', 'interfaces', 'network',
+ 'monit.networks', 'line'],
+ 'lines': []
+ },
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ base_url = self.configuration.get('url', 'http://localhost:2812')
+ self.url = '{0}/_status?format=xml&level=full'.format(base_url)
+
+ def parse(self, data):
+ try:
+ xml = ET.fromstring(data)
+ except ET.ParseError:
+ self.error("URL {0} didn't return a vaild XML page. Please check your settings.".format(self.url))
+ return None
+ return xml
+
+ def check(self):
+ self._manager = self._build_manager()
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ return bool(self.parse(raw_data))
+
+ def _get_data(self):
+ raw_data = self._get_raw_data()
+
+ if not raw_data:
+ return None
+
+ xml = self.parse(raw_data)
+ if not xml:
+ return None
+
+ data = {}
+ for service_id in DEFAULT_SERVICES_IDS:
+ service_category = MONIT_SERVICE_NAMES[service_id].lower()
+
+ if service_category == 'system':
+ self.debug("Skipping service from 'System' category, because it's useless in graphs")
+ continue
+
+ xpath_query = "./service[@type='{0}']".format(service_id)
+ self.debug('Searching for {0} as {1}'.format(service_category, xpath_query))
+ for service_node in xml.findall(xpath_query):
+
+ service_name = service_node.find('name').text
+ service_status = service_node.find('status').text
+ service_monitoring = service_node.find('monitor').text
+ self.debug('=> found {0} with type={1}, status={2}, monitoring={3}'.format(service_name,
+ service_id, service_status, service_monitoring))
+
+ dimension_key = service_category + '_' + service_name
+ if dimension_key not in self.charts[service_category]:
+ self.charts[service_category].add_dimension([dimension_key, service_name, 'absolute'])
+ data[dimension_key] = 1 if service_status == '0' and service_monitoring == '1' else 0
+
+ if service_category == 'process':
+ for subnode in ('uptime', 'threads', 'children'):
+ subnode_value = service_node.find(subnode)
+ if subnode_value is None:
+ continue
+ if subnode == 'uptime' and int(subnode_value.text) < 0:
+ self.debug('Skipping bugged metrics with negative uptime (monit before v5.16')
+ continue
+ dimension_key = 'process_{0}_{1}'.format(subnode, service_name)
+ if dimension_key not in self.charts['process_' + subnode]:
+ self.charts['process_' + subnode].add_dimension([dimension_key, service_name, 'absolute'])
+ data[dimension_key] = int(subnode_value.text)
+
+ if service_category == 'host':
+ subnode_value = service_node.find('./icmp/responsetime')
+ if subnode_value is None:
+ continue
+ dimension_key = 'host_latency_{0}'.format(service_name)
+ if dimension_key not in self.charts['host_latency']:
+ self.charts['host_latency'].add_dimension([dimension_key, service_name,
+ 'absolute', 1000, 1000000])
+ data[dimension_key] = float(subnode_value.text) * 1000000
+
+ return data or None
diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf
new file mode 100644
index 0000000..9a3fb69
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.conf
@@ -0,0 +1,86 @@
+# netdata python.d.plugin configuration for monit
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'URL' # the URL to fetch monit's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# Example
+#
+# local:
+# name : 'Local Monit'
+# url : 'http://localhost:2812'
+#
+# "local" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
+# in the monit section.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:2812'
diff --git a/collectors/python.d.plugin/mysql/Makefile.inc b/collectors/python.d.plugin/mysql/Makefile.inc
new file mode 100644
index 0000000..03e8b65
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += mysql/mysql.chart.py
+dist_pythonconfig_DATA += mysql/mysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += mysql/README.md mysql/Makefile.inc
+
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
new file mode 100644
index 0000000..498493a
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -0,0 +1,90 @@
+# mysql
+
+Module monitors one or more mysql servers
+
+**Requirements:**
+ * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
+
+It will produce following charts (if data is available):
+
+1. **Bandwidth** in kbps
+ * in
+ * out
+
+2. **Queries** in queries/sec
+ * queries
+ * questions
+ * slow queries
+
+3. **Operations** in operations/sec
+ * opened tables
+ * flush
+ * commit
+ * delete
+ * prepare
+ * read first
+ * read key
+ * read next
+ * read prev
+ * read random
+ * read random next
+ * rollback
+ * save point
+ * update
+ * write
+
+4. **Table Locks** in locks/sec
+ * immediate
+ * waited
+
+5. **Select Issues** in issues/sec
+ * full join
+ * full range join
+ * range
+ * range check
+ * scan
+
+6. **Sort Issues** in issues/sec
+ * merge passes
+ * range
+ * scan
+
+### configuration
+
+You can provide, per server, the following:
+
+1. username which have access to database (defaults to 'root')
+2. password (defaults to none)
+3. mysql my.cnf configuration file
+4. mysql socket (optional)
+5. mysql host (ip or hostname)
+6. mysql port (defaults to 3306)
+
+Here is an example for 3 servers:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ 'my.cnf' : '/etc/mysql/my.cnf'
+ priority : 90000
+
+local_2:
+ user : 'root'
+ pass : 'blablablabla'
+ socket : '/var/run/mysqld/mysqld.sock'
+ update_every : 1
+
+remote:
+ user : 'admin'
+ pass : 'bla'
+ host : 'example.org'
+ port : 9000
+```
+
+If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fmysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
new file mode 100644
index 0000000..20d32f8
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -0,0 +1,621 @@
+# -*- coding: utf-8 -*-
+# Description: MySQL netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.MySQLService import MySQLService
+
+
+# query executed on MySQL server
+QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
+QUERY_SLAVE = 'SHOW SLAVE STATUS;'
+QUERY_VARIABLES = 'SHOW GLOBAL VARIABLES LIKE \'max_connections\';'
+
+GLOBAL_STATS = [
+ 'Bytes_received',
+ 'Bytes_sent',
+ 'Queries',
+ 'Questions',
+ 'Slow_queries',
+ 'Handler_commit',
+ 'Handler_delete',
+ 'Handler_prepare',
+ 'Handler_read_first',
+ 'Handler_read_key',
+ 'Handler_read_next',
+ 'Handler_read_prev',
+ 'Handler_read_rnd',
+ 'Handler_read_rnd_next',
+ 'Handler_rollback',
+ 'Handler_savepoint',
+ 'Handler_savepoint_rollback',
+ 'Handler_update',
+ 'Handler_write',
+ 'Table_locks_immediate',
+ 'Table_locks_waited',
+ 'Select_full_join',
+ 'Select_full_range_join',
+ 'Select_range',
+ 'Select_range_check',
+ 'Select_scan',
+ 'Sort_merge_passes',
+ 'Sort_range',
+ 'Sort_scan',
+ 'Created_tmp_disk_tables',
+ 'Created_tmp_files',
+ 'Created_tmp_tables',
+ 'Connections',
+ 'Aborted_connects',
+ 'Max_used_connections',
+ 'Binlog_cache_disk_use',
+ 'Binlog_cache_use',
+ 'Threads_connected',
+ 'Threads_created',
+ 'Threads_cached',
+ 'Threads_running',
+ 'Thread_cache_misses',
+ 'Innodb_data_read',
+ 'Innodb_data_written',
+ 'Innodb_data_reads',
+ 'Innodb_data_writes',
+ 'Innodb_data_fsyncs',
+ 'Innodb_data_pending_reads',
+ 'Innodb_data_pending_writes',
+ 'Innodb_data_pending_fsyncs',
+ 'Innodb_log_waits',
+ 'Innodb_log_write_requests',
+ 'Innodb_log_writes',
+ 'Innodb_os_log_fsyncs',
+ 'Innodb_os_log_pending_fsyncs',
+ 'Innodb_os_log_pending_writes',
+ 'Innodb_os_log_written',
+ 'Innodb_row_lock_current_waits',
+ 'Innodb_rows_inserted',
+ 'Innodb_rows_read',
+ 'Innodb_rows_updated',
+ 'Innodb_rows_deleted',
+ 'Innodb_buffer_pool_pages_data',
+ 'Innodb_buffer_pool_pages_dirty',
+ 'Innodb_buffer_pool_pages_free',
+ 'Innodb_buffer_pool_pages_flushed',
+ 'Innodb_buffer_pool_pages_misc',
+ 'Innodb_buffer_pool_pages_total',
+ 'Innodb_buffer_pool_bytes_data',
+ 'Innodb_buffer_pool_bytes_dirty',
+ 'Innodb_buffer_pool_read_ahead',
+ 'Innodb_buffer_pool_read_ahead_evicted',
+ 'Innodb_buffer_pool_read_ahead_rnd',
+ 'Innodb_buffer_pool_read_requests',
+ 'Innodb_buffer_pool_write_requests',
+ 'Innodb_buffer_pool_reads',
+ 'Innodb_buffer_pool_wait_free',
+ 'Qcache_hits',
+ 'Qcache_lowmem_prunes',
+ 'Qcache_inserts',
+ 'Qcache_not_cached',
+ 'Qcache_queries_in_cache',
+ 'Qcache_free_memory',
+ 'Qcache_free_blocks',
+ 'Qcache_total_blocks',
+ 'Key_blocks_unused',
+ 'Key_blocks_used',
+ 'Key_blocks_not_flushed',
+ 'Key_read_requests',
+ 'Key_write_requests',
+ 'Key_reads',
+ 'Key_writes',
+ 'Open_files',
+ 'Opened_files',
+ 'Binlog_stmt_cache_disk_use',
+ 'Binlog_stmt_cache_use',
+ 'Connection_errors_accept',
+ 'Connection_errors_internal',
+ 'Connection_errors_max_connections',
+ 'Connection_errors_peer_address',
+ 'Connection_errors_select',
+ 'Connection_errors_tcpwrap',
+ 'wsrep_local_recv_queue',
+ 'wsrep_local_send_queue',
+ 'wsrep_received',
+ 'wsrep_replicated',
+ 'wsrep_received_bytes',
+ 'wsrep_replicated_bytes',
+ 'wsrep_local_bf_aborts',
+ 'wsrep_local_cert_failures',
+ 'wsrep_flow_control_paused_ns',
+ 'Com_delete',
+ 'Com_insert',
+ 'Com_select',
+ 'Com_update',
+ 'Com_replace'
+]
+
+
+def slave_seconds(value):
+ try:
+ return int(value)
+ except (TypeError, ValueError):
+ return -1
+
+
+def slave_running(value):
+ return 1 if value == 'Yes' else -1
+
+
+SLAVE_STATS = [
+ ('Seconds_Behind_Master', slave_seconds),
+ ('Slave_SQL_Running', slave_running),
+ ('Slave_IO_Running', slave_running)
+]
+
+VARIABLES = [
+ 'max_connections'
+]
+
+ORDER = [
+ 'net',
+ 'queries',
+ 'queries_type',
+ 'handlers',
+ 'table_locks',
+ 'join_issues',
+ 'sort_issues',
+ 'tmp',
+ 'connections',
+ 'connections_active',
+ 'connection_errors',
+ 'binlog_cache',
+ 'binlog_stmt_cache',
+ 'threads',
+ 'threads_creation_rate',
+ 'thread_cache_misses',
+ 'innodb_io',
+ 'innodb_io_ops',
+ 'innodb_io_pending_ops',
+ 'innodb_log',
+ 'innodb_os_log',
+ 'innodb_os_log_fsync_writes',
+ 'innodb_os_log_io',
+ 'innodb_cur_row_lock',
+ 'innodb_rows',
+ 'innodb_buffer_pool_pages',
+ 'innodb_buffer_pool_flush_pages_requests',
+ 'innodb_buffer_pool_bytes',
+ 'innodb_buffer_pool_read_ahead',
+ 'innodb_buffer_pool_reqs',
+ 'innodb_buffer_pool_ops',
+ 'qcache_ops',
+ 'qcache',
+ 'qcache_freemem',
+ 'qcache_memblocks',
+ 'key_blocks',
+ 'key_requests',
+ 'key_disk_ops',
+ 'files',
+ 'files_rate',
+ 'slave_behind',
+ 'slave_status',
+ 'galera_writesets',
+ 'galera_bytes',
+ 'galera_queue',
+ 'galera_conflicts',
+ 'galera_flow_control'
+]
+
+CHARTS = {
+ 'net': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
+ 'lines': [
+ ['Bytes_received', 'in', 'incremental', 8, 1000],
+ ['Bytes_sent', 'out', 'incremental', -8, 1000]
+ ]
+ },
+ 'queries': {
+ 'options': [None, 'Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
+ 'lines': [
+ ['Queries', 'queries', 'incremental'],
+ ['Questions', 'questions', 'incremental'],
+ ['Slow_queries', 'slow_queries', 'incremental']
+ ]
+ },
+ 'queries_type': {
+ 'options': [None, 'Query Type', 'queries/s', 'query_types', 'mysql.queries_type', 'stacked'],
+ 'lines': [
+ ['Com_select', 'select', 'incremental'],
+ ['Com_delete', 'delete', 'incremental'],
+ ['Com_update', 'update', 'incremental'],
+ ['Com_insert', 'insert', 'incremental'],
+ ['Qcache_hits', 'cache_hits', 'incremental'],
+ ['Com_replace', 'replace', 'incremental']
+ ]
+ },
+ 'handlers': {
+ 'options': [None, 'Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
+ 'lines': [
+ ['Handler_commit', 'commit', 'incremental'],
+ ['Handler_delete', 'delete', 'incremental'],
+ ['Handler_prepare', 'prepare', 'incremental'],
+ ['Handler_read_first', 'read_first', 'incremental'],
+ ['Handler_read_key', 'read_key', 'incremental'],
+ ['Handler_read_next', 'read_next', 'incremental'],
+ ['Handler_read_prev', 'read_prev', 'incremental'],
+ ['Handler_read_rnd', 'read_rnd', 'incremental'],
+ ['Handler_read_rnd_next', 'read_rnd_next', 'incremental'],
+ ['Handler_rollback', 'rollback', 'incremental'],
+ ['Handler_savepoint', 'savepoint', 'incremental'],
+ ['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'],
+ ['Handler_update', 'update', 'incremental'],
+ ['Handler_write', 'write', 'incremental']
+ ]
+ },
+ 'table_locks': {
+ 'options': [None, 'Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
+ 'lines': [
+ ['Table_locks_immediate', 'immediate', 'incremental'],
+ ['Table_locks_waited', 'waited', 'incremental', -1, 1]
+ ]
+ },
+ 'join_issues': {
+ 'options': [None, 'Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
+ 'lines': [
+ ['Select_full_join', 'full_join', 'incremental'],
+ ['Select_full_range_join', 'full_range_join', 'incremental'],
+ ['Select_range', 'range', 'incremental'],
+ ['Select_range_check', 'range_check', 'incremental'],
+ ['Select_scan', 'scan', 'incremental']
+ ]
+ },
+ 'sort_issues': {
+ 'options': [None, 'Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
+ 'lines': [
+ ['Sort_merge_passes', 'merge_passes', 'incremental'],
+ ['Sort_range', 'range', 'incremental'],
+ ['Sort_scan', 'scan', 'incremental']
+ ]
+ },
+ 'tmp': {
+ 'options': [None, 'Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
+ 'lines': [
+ ['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
+ ['Created_tmp_files', 'files', 'incremental'],
+ ['Created_tmp_tables', 'tables', 'incremental']
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
+ 'lines': [
+ ['Connections', 'all', 'incremental'],
+ ['Aborted_connects', 'aborted', 'incremental']
+ ]
+ },
+ 'connections_active': {
+ 'options': [None, 'Connections Active', 'connections', 'connections', 'mysql.connections_active', 'line'],
+ 'lines': [
+ ['Threads_connected', 'active', 'absolute'],
+ ['max_connections', 'limit', 'absolute'],
+ ['Max_used_connections', 'max_active', 'absolute']
+ ]
+ },
+ 'binlog_cache': {
+ 'options': [None, 'Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
+ 'lines': [
+ ['Binlog_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_cache_use', 'all', 'incremental']
+ ]
+ },
+ 'threads': {
+ 'options': [None, 'Threads', 'threads', 'threads', 'mysql.threads', 'line'],
+ 'lines': [
+ ['Threads_connected', 'connected', 'absolute'],
+ ['Threads_cached', 'cached', 'absolute', -1, 1],
+ ['Threads_running', 'running', 'absolute'],
+ ]
+ },
+ 'threads_creation_rate': {
+ 'options': [None, 'Threads Creation Rate', 'threads', 'threads/s', 'mysql.threads', 'line'],
+ 'lines': [
+ ['Threads_created', 'created', 'incremental'],
+ ]
+ },
+ 'thread_cache_misses': {
+ 'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
+ 'lines': [
+ ['Thread_cache_misses', 'misses', 'absolute', 1, 100]
+ ]
+ },
+ 'innodb_io': {
+ 'options': [None, 'InnoDB I/O Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_io', 'area'],
+ 'lines': [
+ ['Innodb_data_read', 'read', 'incremental', 1, 1024],
+ ['Innodb_data_written', 'write', 'incremental', -1, 1024]
+ ]
+ },
+ 'innodb_io_ops': {
+ 'options': [None, 'InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
+ 'lines': [
+ ['Innodb_data_reads', 'reads', 'incremental'],
+ ['Innodb_data_writes', 'writes', 'incremental', -1, 1],
+ ['Innodb_data_fsyncs', 'fsyncs', 'incremental']
+ ]
+ },
+ 'innodb_io_pending_ops': {
+ 'options': [None, 'InnoDB Pending I/O Operations', 'operations', 'innodb',
+ 'mysql.innodb_io_pending_ops', 'line'],
+ 'lines': [
+ ['Innodb_data_pending_reads', 'reads', 'absolute'],
+ ['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1],
+ ['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute']
+ ]
+ },
+ 'innodb_log': {
+ 'options': [None, 'InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
+ 'lines': [
+ ['Innodb_log_waits', 'waits', 'incremental'],
+ ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
+ ['Innodb_log_writes', 'writes', 'incremental', -1, 1],
+ ]
+ },
+ 'innodb_os_log': {
+ 'options': [None, 'InnoDB OS Log Pending Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
+ 'lines': [
+ ['Innodb_os_log_pending_fsyncs', 'fsyncs', 'absolute'],
+ ['Innodb_os_log_pending_writes', 'writes', 'absolute', -1, 1],
+ ]
+ },
+ 'innodb_os_log_fsync_writes': {
+ 'options': [None, 'InnoDB OS Log Operations', 'operations/s', 'innodb', 'mysql.innodb_os_log', 'line'],
+ 'lines': [
+ ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
+ ]
+ },
+ 'innodb_os_log_io': {
+ 'options': [None, 'InnoDB OS Log Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
+ 'lines': [
+ ['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
+ ]
+ },
+ 'innodb_cur_row_lock': {
+ 'options': [None, 'InnoDB Current Row Locks', 'operations', 'innodb',
+ 'mysql.innodb_cur_row_lock', 'area'],
+ 'lines': [
+ ['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
+ ]
+ },
+ 'innodb_rows': {
+ 'options': [None, 'InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
+ 'lines': [
+ ['Innodb_rows_inserted', 'inserted', 'incremental'],
+ ['Innodb_rows_read', 'read', 'incremental', 1, 1],
+ ['Innodb_rows_updated', 'updated', 'incremental', 1, 1],
+ ['Innodb_rows_deleted', 'deleted', 'incremental', -1, 1],
+ ]
+ },
+ 'innodb_buffer_pool_pages': {
+ 'options': [None, 'InnoDB Buffer Pool Pages', 'pages', 'innodb',
+ 'mysql.innodb_buffer_pool_pages', 'line'],
+ 'lines': [
+ ['Innodb_buffer_pool_pages_data', 'data', 'absolute'],
+ ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_free', 'free', 'absolute'],
+ ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_total', 'total', 'absolute']
+ ]
+ },
+ 'innodb_buffer_pool_flush_pages_requests': {
+ 'options': [None, 'InnoDB Buffer Pool Flush Pages Requests', 'requests/s', 'innodb',
+ 'mysql.innodb_buffer_pool_pages', 'line'],
+ 'lines': [
+ ['Innodb_buffer_pool_pages_flushed', 'flush pages', 'incremental'],
+ ]
+ },
+ 'innodb_buffer_pool_bytes': {
+ 'options': [None, 'InnoDB Buffer Pool Bytes', 'MiB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
+ ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
+ ]
+ },
+ 'innodb_buffer_pool_read_ahead': {
+ 'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb',
+ 'mysql.innodb_buffer_pool_read_ahead', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_read_ahead', 'all', 'incremental'],
+ ['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1],
+ ['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental']
+ ]
+ },
+ 'innodb_buffer_pool_reqs': {
+ 'options': [None, 'InnoDB Buffer Pool Requests', 'requests/s', 'innodb',
+ 'mysql.innodb_buffer_pool_reqs', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
+ ['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1]
+ ]
+ },
+ 'innodb_buffer_pool_ops': {
+ 'options': [None, 'InnoDB Buffer Pool Operations', 'operations/s', 'innodb',
+ 'mysql.innodb_buffer_pool_ops', 'area'],
+ 'lines': [
+ ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
+ ['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1]
+ ]
+ },
+ 'qcache_ops': {
+ 'options': [None, 'QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
+ 'lines': [
+ ['Qcache_hits', 'hits', 'incremental'],
+ ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
+ ['Qcache_inserts', 'inserts', 'incremental'],
+ ['Qcache_not_cached', 'not cached', 'incremental', -1, 1]
+ ]
+ },
+ 'qcache': {
+ 'options': [None, 'QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
+ 'lines': [
+ ['Qcache_queries_in_cache', 'queries', 'absolute']
+ ]
+ },
+ 'qcache_freemem': {
+ 'options': [None, 'QCache Free Memory', 'MiB', 'qcache', 'mysql.qcache_freemem', 'area'],
+ 'lines': [
+ ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'qcache_memblocks': {
+ 'options': [None, 'QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
+ 'lines': [
+ ['Qcache_free_blocks', 'free', 'absolute'],
+ ['Qcache_total_blocks', 'total', 'absolute']
+ ]
+ },
+ 'key_blocks': {
+ 'options': [None, 'MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
+ 'lines': [
+ ['Key_blocks_unused', 'unused', 'absolute'],
+ ['Key_blocks_used', 'used', 'absolute', -1, 1],
+ ['Key_blocks_not_flushed', 'not flushed', 'absolute']
+ ]
+ },
+ 'key_requests': {
+ 'options': [None, 'MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
+ 'lines': [
+ ['Key_read_requests', 'reads', 'incremental'],
+ ['Key_write_requests', 'writes', 'incremental', -1, 1]
+ ]
+ },
+ 'key_disk_ops': {
+ 'options': [None, 'MyISAM Key Cache Disk Operations', 'operations/s',
+ 'myisam', 'mysql.key_disk_ops', 'area'],
+ 'lines': [
+ ['Key_reads', 'reads', 'incremental'],
+ ['Key_writes', 'writes', 'incremental', -1, 1]
+ ]
+ },
+ 'files': {
+ 'options': [None, 'Open Files', 'files', 'files', 'mysql.files', 'line'],
+ 'lines': [
+ ['Open_files', 'files', 'absolute']
+ ]
+ },
+ 'files_rate': {
+ 'options': [None, 'Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
+ 'lines': [
+ ['Opened_files', 'files', 'incremental']
+ ]
+ },
+ 'binlog_stmt_cache': {
+ 'options': [None, 'Binlog Statement Cache', 'statements/s', 'binlog',
+ 'mysql.binlog_stmt_cache', 'line'],
+ 'lines': [
+ ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_stmt_cache_use', 'all', 'incremental']
+ ]
+ },
+ 'connection_errors': {
+ 'options': [None, 'Connection Errors', 'connections/s', 'connections',
+ 'mysql.connection_errors', 'line'],
+ 'lines': [
+ ['Connection_errors_accept', 'accept', 'incremental'],
+ ['Connection_errors_internal', 'internal', 'incremental'],
+ ['Connection_errors_max_connections', 'max', 'incremental'],
+ ['Connection_errors_peer_address', 'peer_addr', 'incremental'],
+ ['Connection_errors_select', 'select', 'incremental'],
+ ['Connection_errors_tcpwrap', 'tcpwrap', 'incremental']
+ ]
+ },
+ 'slave_behind': {
+ 'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
+ 'lines': [
+ ['Seconds_Behind_Master', 'seconds', 'absolute']
+ ]
+ },
+ 'slave_status': {
+ 'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
+ 'lines': [
+ ['Slave_SQL_Running', 'sql_running', 'absolute'],
+ ['Slave_IO_Running', 'io_running', 'absolute']
+ ]
+ },
+ 'galera_writesets': {
+ 'options': [None, 'Replicated Writesets', 'writesets/s', 'galera', 'mysql.galera_writesets', 'line'],
+ 'lines': [
+ ['wsrep_received', 'rx', 'incremental'],
+ ['wsrep_replicated', 'tx', 'incremental', -1, 1],
+ ]
+ },
+ 'galera_bytes': {
+ 'options': [None, 'Replicated Bytes', 'KiB/s', 'galera', 'mysql.galera_bytes', 'area'],
+ 'lines': [
+ ['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
+ ['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
+ ]
+ },
+ 'galera_queue': {
+ 'options': [None, 'Galera Queue', 'writesets', 'galera', 'mysql.galera_queue', 'line'],
+ 'lines': [
+ ['wsrep_local_recv_queue', 'rx', 'absolute'],
+ ['wsrep_local_send_queue', 'tx', 'absolute', -1, 1],
+ ]
+ },
+ 'galera_conflicts': {
+ 'options': [None, 'Replication Conflicts', 'transactions', 'galera', 'mysql.galera_conflicts', 'area'],
+ 'lines': [
+ ['wsrep_local_bf_aborts', 'bf_aborts', 'incremental'],
+ ['wsrep_local_cert_failures', 'cert_fails', 'incremental', -1, 1],
+ ]
+ },
+ 'galera_flow_control': {
+ 'options': [None, 'Flow Control', 'millisec', 'galera', 'mysql.galera_flow_control', 'area'],
+ 'lines': [
+ ['wsrep_flow_control_paused_ns', 'paused', 'incremental', 1, 1000000],
+ ]
+ }
+}
+
+
+class Service(MySQLService):
+ def __init__(self, configuration=None, name=None):
+ MySQLService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.queries = dict(
+ global_status=QUERY_GLOBAL,
+ slave_status=QUERY_SLAVE,
+ variables=QUERY_VARIABLES,
+ )
+
+ def _get_data(self):
+
+ raw_data = self._get_raw_data(description=True)
+
+ if not raw_data:
+ return None
+
+ to_netdata = dict()
+
+ if 'global_status' in raw_data:
+ global_status = dict(raw_data['global_status'][0])
+ for key in GLOBAL_STATS:
+ if key in global_status:
+ to_netdata[key] = global_status[key]
+ if 'Threads_created' in to_netdata and 'Connections' in to_netdata:
+ to_netdata['Thread_cache_misses'] = round(int(to_netdata['Threads_created'])
+ / float(to_netdata['Connections']) * 10000)
+
+ if 'slave_status' in raw_data:
+ if raw_data['slave_status'][0]:
+ slave_raw_data = dict(zip([e[0] for e in raw_data['slave_status'][1]], raw_data['slave_status'][0][0]))
+ for key, func in SLAVE_STATS:
+ if key in slave_raw_data:
+ to_netdata[key] = func(slave_raw_data[key])
+ else:
+ self.queries.pop('slave_status')
+
+ if 'variables' in raw_data:
+ variables = dict(raw_data['variables'][0])
+ for key in VARIABLES:
+ if key in variables:
+ to_netdata[key] = variables[key]
+
+ return to_netdata or None
diff --git a/collectors/python.d.plugin/mysql/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf
new file mode 100644
index 0000000..ac9b505
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/mysql.conf
@@ -0,0 +1,285 @@
+# netdata python.d.plugin configuration for mysql
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, mysql also supports the following:
+#
+# socket: 'path/to/mysql.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the mysql username to use
+# pass: 'password' # the mysql password to use
+#
+
+# ----------------------------------------------------------------------
+# mySQL CONFIGURATION
+#
+# netdata does not need any privilege - only the ability to connect
+# to the mysql server (netdata will not be able to see any data).
+#
+# Execute these commands to give the local user 'netdata' the ability
+# to connect to the mysql server on localhost, without a password:
+#
+# > create user 'netdata'@'localhost';
+# > grant usage on *.* to 'netdata'@'localhost';
+# > flush privileges;
+#
+# with the above statements, netdata will be able to gather mysql
+# statistics, without the ability to see or alter any data or affect
+# mysql operation in any way. No change is required below.
+#
+# If you need to monitor mysql replication too, use this instead:
+#
+# > create user 'netdata'@'localhost';
+# > grant replication client on *.* to 'netdata'@'localhost';
+# > flush privileges;
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+mycnf1:
+ name : 'local'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2:
+ name : 'local'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+debiancnf:
+ name : 'local'
+ 'my.cnf' : '/etc/mysql/debian.cnf'
+
+socket1:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/run/mysqld/mysql.sock'
+
+socket3:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket4:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+ # keep in mind port might be ignored by mysql, if host = 'localhost'
+ # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
+
+tcpipv4:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
+
+# Now we try the same as above with user: root
+# A few systems configure mysql to accept passwordless
+# root access.
+
+mycnf1_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+socket1_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/run/mysqld/mysql.sock'
+
+socket3_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket4_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+ # keep in mind port might be ignored by mysql, if host = 'localhost'
+ # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
+
+tcpipv4_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
+
+# Now we try the same as above with user: netdata
+
+mycnf1_netdata:
+ name : 'local'
+ user : 'netdata'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2_netdata:
+ name : 'local'
+ user : 'netdata'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
+socket1_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/var/run/mysqld/mysqld.sock'
+
+socket2_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/var/run/mysqld/mysql.sock'
+
+socket3_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/var/lib/mysql/mysql.sock'
+
+socket4_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
+tcp_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ host : 'localhost'
+ port : '3306'
+ # keep in mind port might be ignored by mysql, if host = 'localhost'
+ # http://serverfault.com/questions/337818/how-to-force-mysql-to-connect-by-tcp-instead-of-a-unix-socket/337844#337844
+
+tcpipv4_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6_netdata:
+ name : 'local'
+ user : 'netdata'
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
diff --git a/collectors/python.d.plugin/nginx/Makefile.inc b/collectors/python.d.plugin/nginx/Makefile.inc
new file mode 100644
index 0000000..4636aa8
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nginx/nginx.chart.py
+dist_pythonconfig_DATA += nginx/nginx.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx/README.md nginx/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
new file mode 100644
index 0000000..7854105
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -0,0 +1,46 @@
+# nginx
+
+This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
+
+**Requirements:**
+ * nginx with configured 'ngx_http_stub_status_module'
+ * 'location /stub_status'
+
+Example nginx configuration can be found in 'python.d/nginx.conf'
+
+It produces following charts:
+
+1. **Active Connections**
+ * active
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Active Connections by Status**
+ * reading
+ * writing
+ * waiting
+
+4. **Connections Rate** in connections/s
+ * accepts
+ * handled
+
+### configuration
+
+Needs only `url` to server's `stub_status`
+
+Here is an example for local server:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/stub_status'
+```
+
+Without configuration, module attempts to connect to `http://localhost/stub_status`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nginx/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
new file mode 100644
index 0000000..84a5985
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Description: nginx netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'connections',
+ 'requests',
+ 'connection_status',
+ 'connect_rate',
+]
+
+CHARTS = {
+ 'connections': {
+ 'options': [None, 'Active Connections', 'connections', 'active connections',
+ 'nginx.connections', 'line'],
+ 'lines': [
+ ['active']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'incremental']
+ ]
+ },
+ 'connection_status': {
+ 'options': [None, 'Active Connections by Status', 'connections', 'status',
+ 'nginx.connection_status', 'line'],
+ 'lines': [
+ ['reading'],
+ ['writing'],
+ ['waiting', 'idle']
+ ]
+ },
+ 'connect_rate': {
+ 'options': [None, 'Connections Rate', 'connections/s', 'connections rate',
+ 'nginx.connect_rate', 'line'],
+ 'lines': [
+ ['accepts', 'accepted', 'incremental'],
+ ['handled', None, 'incremental']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://localhost/stub_status')
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data().split(" ")
+ return {'active': int(raw[2]),
+ 'requests': int(raw[9]),
+ 'reading': int(raw[11]),
+ 'writing': int(raw[13]),
+ 'waiting': int(raw[15]),
+ 'accepts': int(raw[7]),
+ 'handled': int(raw[8])}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/nginx/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf
new file mode 100644
index 0000000..4001b4b
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/nginx.conf
@@ -0,0 +1,107 @@
+# netdata python.d.plugin configuration for nginx
+#
+# You must have ngx_http_stub_status_module configured on your nginx server for this
+# plugin to work. The following is an example config.
+# It must be located inside a server { } block.
+#
+# location /stub_status {
+# stub_status;
+# # Security: Only allow access from the IP below.
+# allow 192.168.1.200;
+# # Deny anyone else
+# deny all;
+# }
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# Example
+#
+# RemoteNginx:
+# name : 'Reverse_Proxy'
+# url : 'http://yourdomain.com/stub_status'
+#
+# "RemoteNginx" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
+# in the nginx section.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/stub_status'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/stub_status'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/stub_status'
+
diff --git a/collectors/python.d.plugin/nginx_plus/Makefile.inc b/collectors/python.d.plugin/nginx_plus/Makefile.inc
new file mode 100644
index 0000000..d3fdeaf
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nginx_plus/nginx_plus.chart.py
+dist_pythonconfig_DATA += nginx_plus/nginx_plus.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx_plus/README.md nginx_plus/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
new file mode 100644
index 0000000..c20ce30
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -0,0 +1,127 @@
+# nginx_plus
+
+This module will monitor one or more nginx_plus servers depending on configuration.
+Servers can be either local or remote.
+
+Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
+
+It produces following charts:
+
+1. **Requests total** in requests/s
+ * total
+
+2. **Requests current** in requests
+ * current
+
+3. **Connection Statistics** in connections/s
+ * accepted
+ * dropped
+
+4. **Workers Statistics** in workers
+ * idle
+ * active
+
+5. **SSL Handshakes** in handshakes/s
+ * successful
+ * failed
+
+6. **SSL Session Reuses** in sessions/s
+ * reused
+
+7. **SSL Memory Usage** in percent
+ * usage
+
+8. **Processes** in processes
+ * respawned
+
+For every server zone:
+
+1. **Processing** in requests
+ * processing
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Responses** in requests/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+4. **Traffic** in kilobits/s
+ * received
+ * sent
+
+For every upstream:
+
+1. **Peers Requests** in requests/s
+ * peer name (dimension per peer)
+
+2. **All Peers Responses** in responses/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+3. **Peer Responses** in requests/s (for every peer)
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+4. **Peers Connections** in active
+ * peer name (dimension per peer)
+
+5. **Peers Connections Usage** in percent
+ * peer name (dimension per peer)
+
+6. **All Peers Traffic** in KB
+ * received
+ * sent
+
+7. **Peer Traffic** in KB/s (for every peer)
+ * received
+ * sent
+
+8. **Peer Timings** in ms (for every peer)
+ * header
+ * response
+
+9. **Memory Usage** in percent
+ * usage
+
+10. **Peers Status** in state
+ * peer name (dimension per peer)
+
+11. **Peers Total Downtime** in seconds
+ * peer name (dimension per peer)
+
+For every cache:
+
+1. **Traffic** in KB
+ * served
+ * written
+ * bypass
+
+2. **Memory Usage** in percent
+ * usage
+
+### configuration
+
+Needs only `url` to server's `status`
+
+Here is an example for local server:
+
+```yaml
+local:
+ url : 'http://localhost/status'
+```
+
+Without configuration, module fail to start.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx_plus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
new file mode 100644
index 0000000..3082fdb
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -0,0 +1,488 @@
+# -*- coding: utf-8 -*-
+# Description: nginx_plus netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from collections import defaultdict
+from copy import deepcopy
+from json import loads
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'requests_total',
+ 'requests_current',
+ 'connections_statistics',
+ 'connections_workers',
+ 'ssl_handshakes',
+ 'ssl_session_reuses',
+ 'ssl_memory_usage',
+ 'processes'
+]
+
+CHARTS = {
+ 'requests_total': {
+ 'options': [None, 'Requests Total', 'requests/s', 'requests', 'nginx_plus.requests_total', 'line'],
+ 'lines': [
+ ['requests_total', 'total', 'incremental']
+ ]
+ },
+ 'requests_current': {
+ 'options': [None, 'Requests Current', 'requests', 'requests', 'nginx_plus.requests_current', 'line'],
+ 'lines': [
+ ['requests_current', 'current']
+ ]
+ },
+ 'connections_statistics': {
+ 'options': [None, 'Connections Statistics', 'connections/s',
+ 'connections', 'nginx_plus.connections_statistics', 'stacked'],
+ 'lines': [
+ ['connections_accepted', 'accepted', 'incremental'],
+ ['connections_dropped', 'dropped', 'incremental']
+ ]
+ },
+ 'connections_workers': {
+ 'options': [None, 'Workers Statistics', 'workers',
+ 'connections', 'nginx_plus.connections_workers', 'stacked'],
+ 'lines': [
+ ['connections_idle', 'idle'],
+ ['connections_active', 'active']
+ ]
+ },
+ 'ssl_handshakes': {
+ 'options': [None, 'SSL Handshakes', 'handshakes/s', 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'],
+ 'lines': [
+ ['ssl_handshakes', 'successful', 'incremental'],
+ ['ssl_handshakes_failed', 'failed', 'incremental']
+ ]
+ },
+ 'ssl_session_reuses': {
+ 'options': [None, 'Session Reuses', 'sessions/s', 'ssl', 'nginx_plus.ssl_session_reuses', 'line'],
+ 'lines': [
+ ['ssl_session_reuses', 'reused', 'incremental']
+ ]
+ },
+ 'ssl_memory_usage': {
+ 'options': [None, 'Memory Usage', 'percentage', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
+ 'lines': [
+ ['ssl_memory_usage', 'usage', 'absolute', 1, 100]
+ ]
+ },
+ 'processes': {
+ 'options': [None, 'Processes', 'processes', 'processes', 'nginx_plus.processes', 'line'],
+ 'lines': [
+ ['processes_respawned', 'respawned']
+ ]
+ }
+}
+
+
+def cache_charts(cache):
+ family = 'cache {0}'.format(cache.real_name)
+ charts = OrderedDict()
+
+ charts['{0}_traffic'.format(cache.name)] = {
+ 'options': [None, 'Traffic', 'KiB', family, 'nginx_plus.cache_traffic', 'stacked'],
+ 'lines': [
+ ['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
+ ['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
+ ['_'.join([cache.name, 'miss_bytes']), 'bypass', 'absolute', 1, 1024]
+ ]
+ }
+ charts['{0}_memory_usage'.format(cache.name)] = {
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.cache_memory_usage', 'area'],
+ 'lines': [
+ ['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
+ ]
+ }
+ return charts
+
+
+def web_zone_charts(wz):
+ charts = OrderedDict()
+ family = 'web zone {name}'.format(name=wz.real_name)
+
+ # Processing
+ charts['zone_{name}_processing'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Processing'.format(name=wz.name), 'requests', family,
+ 'nginx_plus.web_zone_processing', 'line'],
+ 'lines': [
+ ['_'.join([wz.name, 'processing']), 'processing']
+ ]
+ }
+ # Requests
+ charts['zone_{name}_requests'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Requests'.format(name=wz.name), 'requests/s', family,
+ 'nginx_plus.web_zone_requests', 'line'],
+ 'lines': [
+ ['_'.join([wz.name, 'requests']), 'requests', 'incremental']
+ ]
+ }
+ # Response Codes
+ charts['zone_{name}_responses'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Responses'.format(name=wz.name), 'requests/s', family,
+ 'nginx_plus.web_zone_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wz.name, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_1xx']), '1xx', 'incremental']
+ ]
+ }
+ # Traffic
+ charts['zone_{name}_net'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Traffic'.format(name=wz.name), 'kilobits/s', family,
+ 'nginx_plus.zone_net', 'area'],
+ 'lines': [
+ ['_'.join([wz.name, 'received']), 'received', 'incremental', 1, 1000],
+ ['_'.join([wz.name, 'sent']), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ return charts
+
+
+def web_upstream_charts(wu):
+ def dimensions(value, a='absolute', m=1, d=1):
+ dims = list()
+ for p in wu:
+ dims.append(['_'.join([wu.name, p.server, value]), p.real_server, a, m, d])
+ return dims
+
+ charts = OrderedDict()
+ family = 'web upstream {name}'.format(name=wu.real_name)
+
+ # Requests
+ charts['web_upstream_{name}_requests'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Requests', 'requests/s', family, 'nginx_plus.web_upstream_requests', 'line'],
+ 'lines': dimensions('requests', 'incremental')
+ }
+ # Responses Codes
+ charts['web_upstream_{name}_all_responses'.format(name=wu.name)] = {
+ 'options': [None, 'All Peers Responses', 'responses/s', family,
+ 'nginx_plus.web_upstream_all_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wu.name, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_1xx']), '1xx', 'incremental'],
+ ]
+ }
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Responses'.format(peer.real_server), 'responses/s', family,
+ 'nginx_plus.web_upstream_peer_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wu.name, peer.server, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_1xx']), '1xx', 'incremental']
+ ]
+ }
+ # Connections
+ charts['web_upstream_{name}_connections'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Connections', 'active', family, 'nginx_plus.web_upstream_connections', 'line'],
+ 'lines': dimensions('active')
+ }
+ charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Connections Usage', 'percentage', family,
+ 'nginx_plus.web_upstream_connections_usage', 'line'],
+ 'lines': dimensions('connections_usage', d=100)
+ }
+ # Traffic
+ charts['web_upstream_{0}_all_net'.format(wu.name)] = {
+ 'options': [None, 'All Peers Traffic', 'kilobits/s', family, 'nginx_plus.web_upstream_all_net', 'area'],
+ 'lines': [
+ ['{0}_received'.format(wu.name), 'received', 'incremental', 1, 1000],
+ ['{0}_sent'.format(wu.name), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Traffic'.format(peer.real_server), 'kilobits/s', family,
+ 'nginx_plus.web_upstream_peer_traffic', 'area'],
+ 'lines': [
+ ['{0}_{1}_received'.format(wu.name, peer.server), 'received', 'incremental', 1, 1000],
+ ['{0}_{1}_sent'.format(wu.name, peer.server), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ # Response Time
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'milliseconds', family,
+ 'nginx_plus.web_upstream_peer_timings', 'line'],
+ 'lines': [
+ ['_'.join([wu.name, peer.server, 'header_time']), 'header'],
+ ['_'.join([wu.name, peer.server, 'response_time']), 'response']
+ ]
+ }
+ # Memory Usage
+ charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
+ 'lines': [
+ ['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
+ ]
+ }
+ # State
+ charts['web_upstream_{name}_status'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Status', 'state', family, 'nginx_plus.web_upstream_status', 'line'],
+ 'lines': dimensions('state')
+ }
+ # Downtime
+ charts['web_upstream_{name}_downtime'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Downtime', 'seconds', family, 'nginx_plus.web_upstream_peer_downtime', 'line'],
+ 'lines': dimensions('downtime', d=1000)
+ }
+
+ return charts
+
+
+METRICS = {
+ 'SERVER': [
+ 'processes.respawned',
+ 'connections.accepted',
+ 'connections.dropped',
+ 'connections.active',
+ 'connections.idle',
+ 'ssl.handshakes',
+ 'ssl.handshakes_failed',
+ 'ssl.session_reuses',
+ 'requests.total',
+ 'requests.current',
+ 'slabs.SSL.pages.free',
+ 'slabs.SSL.pages.used'
+ ],
+ 'WEB_ZONE': [
+ 'processing',
+ 'requests',
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'discarded',
+ 'received',
+ 'sent'
+ ],
+ 'WEB_UPSTREAM_PEER': [
+ 'id',
+ 'server',
+ 'name',
+ 'state',
+ 'active',
+ 'max_conns',
+ 'requests',
+ 'header_time', # alive only
+ 'response_time', # alive only
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'sent',
+ 'received',
+ 'downtime'
+ ],
+ 'WEB_UPSTREAM_SUMMARY': [
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'sent',
+ 'received'
+ ],
+ 'CACHE': [
+ 'hit.bytes', # served
+ 'miss.bytes_written', # written
+ 'miss.bytes' # bypass
+
+ ]
+}
+
+BAD_SYMBOLS = re.compile(r'[:/.-]+')
+
+
+class Cache:
+ key = 'caches'
+ charts = cache_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+
+ def memory_usage(self, data):
+ used = data['slabs'][self.real_name]['pages']['used']
+ free = data['slabs'][self.real_name]['pages']['free']
+ return used / float(free + used) * 1e4
+
+ def get_data(self, raw_data):
+ zone_data = raw_data['caches'][self.real_name]
+ data = parse_json(zone_data, METRICS['CACHE'])
+ data['memory_usage'] = self.memory_usage(raw_data)
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebZone:
+ key = 'server_zones'
+ charts = web_zone_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+
+ def get_data(self, raw_data):
+ zone_data = raw_data['server_zones'][self.real_name]
+ data = parse_json(zone_data, METRICS['WEB_ZONE'])
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebUpstream:
+ key = 'upstreams'
+ charts = web_upstream_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+ self.peers = OrderedDict()
+
+ peers = kw['response']['upstreams'][self.real_name]['peers']
+ for peer in peers:
+ self.add_peer(peer['id'], peer['server'])
+
+ def __iter__(self):
+ return iter(self.peers.values())
+
+ def add_peer(self, idx, server):
+ peer = WebUpstreamPeer(idx, server)
+ self.peers[peer.real_server] = peer
+ return peer
+
+ def peers_stats(self, peers):
+ peers = {int(peer['id']): peer for peer in peers}
+ data = dict()
+ for peer in self.peers.values():
+ if not peer.active:
+ continue
+ try:
+ data.update(peer.get_data(peers[peer.id]))
+ except KeyError:
+ peer.active = False
+ return data
+
+ def memory_usage(self, data):
+ used = data['slabs'][self.real_name]['pages']['used']
+ free = data['slabs'][self.real_name]['pages']['free']
+ return used / float(free + used) * 1e4
+
+ def summary_stats(self, data):
+ rv = defaultdict(int)
+ for metric in METRICS['WEB_UPSTREAM_SUMMARY']:
+ for peer in self.peers.values():
+ if peer.active:
+ metric = '_'.join(metric.split('.'))
+ rv[metric] += data['_'.join([peer.server, metric])]
+ return rv
+
+ def get_data(self, raw_data):
+ data = dict()
+ peers = raw_data['upstreams'][self.real_name]['peers']
+ data.update(self.peers_stats(peers))
+ data.update(self.summary_stats(data))
+ data['memory_usage'] = self.memory_usage(raw_data)
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebUpstreamPeer:
+ def __init__(self, idx, server):
+ self.id = idx
+ self.real_server = server
+ self.server = BAD_SYMBOLS.sub('_', self.real_server)
+ self.active = True
+
+ def get_data(self, raw):
+ data = dict(header_time=0, response_time=0, max_conns=0)
+ data.update(parse_json(raw, METRICS['WEB_UPSTREAM_PEER']))
+ data['connections_usage'] = 0 if not data['max_conns'] else data['active'] / float(data['max_conns']) * 1e4
+ data['state'] = int(data['state'] == 'up')
+ return dict(('_'.join([self.server, k]), v) for k, v in data.items())
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = deepcopy(CHARTS)
+ self.objects = dict()
+
+ def check(self):
+ if not self.url:
+ self.error('URL is not defined')
+ return None
+
+ self._manager = self._build_manager()
+ if not self._manager:
+ return None
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ response = loads(raw_data)
+ except ValueError:
+ return None
+
+ for obj_cls in [WebZone, WebUpstream, Cache]:
+ for obj_name in response.get(obj_cls.key, list()):
+ obj = obj_cls(name=obj_name, response=response)
+ self.objects[obj.real_name] = obj
+ charts = obj_cls.charts(obj)
+ for chart in charts:
+ self.order.append(chart)
+ self.definitions[chart] = charts[chart]
+
+ return bool(self.objects)
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ response = loads(raw_data)
+
+ data = parse_json(response, METRICS['SERVER'])
+ data['ssl_memory_usage'] = data['slabs_SSL_pages_used'] / float(data['slabs_SSL_pages_free']) * 1e4
+
+ for obj in self.objects.values():
+ if obj.real_name in response[obj.key]:
+ data.update(obj.get_data(response))
+
+ return data
+
+
+def parse_json(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
new file mode 100644
index 0000000..201eb0e
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for nginx_plus
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, nginx_plus also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx_plus's stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/status'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/status'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/status'
diff --git a/collectors/python.d.plugin/nsd/Makefile.inc b/collectors/python.d.plugin/nsd/Makefile.inc
new file mode 100644
index 0000000..58e9fd6
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nsd/nsd.chart.py
+dist_pythonconfig_DATA += nsd/nsd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nsd/README.md nsd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
new file mode 100644
index 0000000..b118657
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -0,0 +1,56 @@
+# nsd
+
+Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
+
+**Requirements:**
+ * Version of `nsd` must be 4.0+
+ * Netdata must have permissions to run `nsd-control stats_noreset`
+
+It produces:
+
+1. **Queries**
+ * queries
+
+2. **Zones**
+ * master
+ * slave
+
+3. **Protocol**
+ * udp
+ * udp6
+ * tcp
+ * tcp6
+
+4. **Query Type**
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * HINFO
+ * MX
+ * NAPTR
+ * TXT
+ * AAAA
+ * SRV
+ * ANY
+
+5. **Transfer**
+ * NOTIFY
+ * AXFR
+
+6. **Return Code**
+ * NOERROR
+ * FORMERR
+ * SERVFAIL
+ * NXDOMAIN
+ * NOTIMP
+ * REFUSED
+ * YXDOMAIN
+
+
+Configuration is not needed.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnsd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
new file mode 100644
index 0000000..77b0d7b
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# Description: NSD `nsd-control stats_noreset` netdata python.d module
+# Author: <383c57 at gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+
+update_every = 30
+
+NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
+REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
+
+ORDER = [
+ 'queries',
+ 'zones',
+ 'protocol',
+ 'type',
+ 'transfer',
+ 'rcode',
+]
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'queries', 'queries/s', 'queries', 'nsd.queries', 'line'],
+ 'lines': [
+ ['num_queries', 'queries', 'incremental']
+ ]
+ },
+ 'zones': {
+ 'options': [None, 'zones', 'zones', 'zones', 'nsd.zones', 'stacked'],
+ 'lines': [
+ ['zone_master', 'master', 'absolute'],
+ ['zone_slave', 'slave', 'absolute']
+ ]
+ },
+ 'protocol': {
+ 'options': [None, 'protocol', 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
+ 'lines': [
+ ['num_udp', 'udp', 'incremental'],
+ ['num_udp6', 'udp6', 'incremental'],
+ ['num_tcp', 'tcp', 'incremental'],
+ ['num_tcp6', 'tcp6', 'incremental']
+ ]
+ },
+ 'type': {
+ 'options': [None, 'query type', 'queries/s', 'query type', 'nsd.type', 'stacked'],
+ 'lines': [
+ ['num_type_A', 'A', 'incremental'],
+ ['num_type_NS', 'NS', 'incremental'],
+ ['num_type_CNAME', 'CNAME', 'incremental'],
+ ['num_type_SOA', 'SOA', 'incremental'],
+ ['num_type_PTR', 'PTR', 'incremental'],
+ ['num_type_HINFO', 'HINFO', 'incremental'],
+ ['num_type_MX', 'MX', 'incremental'],
+ ['num_type_NAPTR', 'NAPTR', 'incremental'],
+ ['num_type_TXT', 'TXT', 'incremental'],
+ ['num_type_AAAA', 'AAAA', 'incremental'],
+ ['num_type_SRV', 'SRV', 'incremental'],
+ ['num_type_TYPE255', 'ANY', 'incremental']
+ ]
+ },
+ 'transfer': {
+ 'options': [None, 'transfer', 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
+ 'lines': [
+ ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
+ ['num_type_TYPE252', 'AXFR', 'incremental']
+ ]
+ },
+ 'rcode': {
+ 'options': [None, 'return code', 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
+ 'lines': [
+ ['num_rcode_NOERROR', 'NOERROR', 'incremental'],
+ ['num_rcode_FORMERR', 'FORMERR', 'incremental'],
+ ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
+ ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
+ ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
+ ['num_rcode_REFUSED', 'REFUSED', 'incremental'],
+ ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = NSD_CONTROL_COMMAND
+
+ def _get_data(self):
+ lines = self._get_raw_data()
+ if not lines:
+ return None
+
+ stats = dict(
+ (k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines))
+ )
+ stats.setdefault('num_opcode_NOTIFY', 0)
+ stats.setdefault('num_type_TYPE252', 0)
+ stats.setdefault('num_type_TYPE255', 0)
+
+ return stats
diff --git a/collectors/python.d.plugin/nsd/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf
new file mode 100644
index 0000000..77a8a31
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/nsd.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for nsd
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# nsd-control is slow, so once every 30 seconds
+# update_every: 30
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, nsd also supports the following:
+#
+# command: 'nsd-control stats_noreset' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# IMPORTANT Information
+#
+# Netdata must have permissions to run `nsd-control stats_noreset` command
+#
+# - Example-1 (use "sudo")
+# 1. sudoers (e.g. visudo -f /etc/sudoers.d/netdata)
+# Defaults:netdata !requiretty
+# netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
+# 2. etc/netdata/python.d/nsd.conf
+# local:
+# update_every: 30
+# command: 'sudo /usr/sbin/nsd-control stats_noreset'
+#
+# - Example-2 (add "netdata" user to "nsd" group)
+# usermod -aG nsd netdata
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ update_every: 30
+ command: 'nsd-control stats_noreset'
diff --git a/collectors/python.d.plugin/ntpd/Makefile.inc b/collectors/python.d.plugin/ntpd/Makefile.inc
new file mode 100644
index 0000000..81210eb
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ntpd/ntpd.chart.py
+dist_pythonconfig_DATA += ntpd/ntpd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ntpd/README.md ntpd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
new file mode 100644
index 0000000..d33fd87
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -0,0 +1,73 @@
+# ntpd
+
+Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
+
+**Requirements:**
+ * Version: `NTPv4`
+ * Local interrogation allowed in `/etc/ntp.conf` (default):
+
+```
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+```
+
+It produces:
+
+1. system
+ * offset
+ * jitter
+ * frequency
+ * delay
+ * dispersion
+ * stratum
+ * tc
+ * precision
+
+2. peers
+ * offset
+ * delay
+ * dispersion
+ * jitter
+ * rootdelay
+ * rootdispersion
+ * stratum
+ * hmode
+ * pmode
+ * hpoll
+ * ppoll
+ * precision
+
+**configuration**
+
+Sample:
+
+```yaml
+update_every: 10
+
+host: 'localhost'
+port: '123'
+show_peers: yes
+# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16
+peer_filter: '(127\..*)|(192\.168\..*)'
+# check for new/changed peers every 60 updates
+peer_rescan: 60
+```
+
+Sample (multiple jobs):
+
+Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`.
+
+```yaml
+local:
+ host: 'localhost'
+
+otherhost:
+ host: 'otherhost'
+```
+
+If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fntpd%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
new file mode 100644
index 0000000..5a5477e
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -0,0 +1,386 @@
+# -*- coding: utf-8 -*-
+# Description: ntpd netdata python.d module
+# Author: Sven Mäder (rda0)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import struct
+import re
+
+from bases.FrameworkServices.SocketService import SocketService
+
+
+# NTP Control Message Protocol constants
+MODE = 6
+HEADER_FORMAT = '!BBHHHHH'
+HEADER_LEN = 12
+OPCODES = {
+ 'readstat': 1,
+ 'readvar': 2
+}
+
+# Maximal dimension precision
+PRECISION = 1000000
+
+# Static charts
+ORDER = [
+ 'sys_offset',
+ 'sys_jitter',
+ 'sys_frequency',
+ 'sys_wander',
+ 'sys_rootdelay',
+ 'sys_rootdisp',
+ 'sys_stratum',
+ 'sys_tc',
+ 'sys_precision',
+ 'peer_offset',
+ 'peer_delay',
+ 'peer_dispersion',
+ 'peer_jitter',
+ 'peer_xleave',
+ 'peer_rootdelay',
+ 'peer_rootdisp',
+ 'peer_stratum',
+ 'peer_hmode',
+ 'peer_pmode',
+ 'peer_hpoll',
+ 'peer_ppoll',
+ 'peer_precision'
+]
+
+CHARTS = {
+ 'sys_offset': {
+ 'options': [None, 'Combined offset of server relative to this host', 'milliseconds',
+ 'system', 'ntpd.sys_offset', 'area'],
+ 'lines': [
+ ['offset', 'offset', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_jitter': {
+ 'options': [None, 'Combined system jitter and clock jitter', 'milliseconds',
+ 'system', 'ntpd.sys_jitter', 'line'],
+ 'lines': [
+ ['sys_jitter', 'system', 'absolute', 1, PRECISION],
+ ['clk_jitter', 'clock', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_frequency': {
+ 'options': [None, 'Frequency offset relative to hardware clock', 'ppm', 'system', 'ntpd.sys_frequency', 'area'],
+ 'lines': [
+ ['frequency', 'frequency', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_wander': {
+ 'options': [None, 'Clock frequency wander', 'ppm', 'system', 'ntpd.sys_wander', 'area'],
+ 'lines': [
+ ['clk_wander', 'clock', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_rootdelay': {
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'system',
+ 'ntpd.sys_rootdelay', 'area'],
+ 'lines': [
+ ['rootdelay', 'delay', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_rootdisp': {
+ 'options': [None, 'Total root dispersion to the primary reference clock', 'milliseconds', 'system',
+ 'ntpd.sys_rootdisp', 'area'],
+ 'lines': [
+ ['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_stratum': {
+ 'options': [None, 'Stratum (1-15)', 'stratum', 'system', 'ntpd.sys_stratum', 'line'],
+ 'lines': [
+ ['stratum', 'stratum', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_tc': {
+ 'options': [None, 'Time constant and poll exponent (3-17)', 'log2 s', 'system', 'ntpd.sys_tc', 'line'],
+ 'lines': [
+ ['tc', 'current', 'absolute', 1, PRECISION],
+ ['mintc', 'minimum', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'sys_precision': {
+ 'options': [None, 'Precision', 'log2 s', 'system', 'ntpd.sys_precision', 'line'],
+ 'lines': [
+ ['precision', 'precision', 'absolute', 1, PRECISION]
+ ]
+ }
+}
+
+PEER_CHARTS = {
+ 'peer_offset': {
+ 'options': [None, 'Filter offset', 'milliseconds', 'peers', 'ntpd.peer_offset', 'line'],
+ 'lines': []
+ },
+ 'peer_delay': {
+ 'options': [None, 'Filter delay', 'milliseconds', 'peers', 'ntpd.peer_delay', 'line'],
+ 'lines': []
+ },
+ 'peer_dispersion': {
+ 'options': [None, 'Filter dispersion', 'milliseconds', 'peers', 'ntpd.peer_dispersion', 'line'],
+ 'lines': []
+ },
+ 'peer_jitter': {
+ 'options': [None, 'Filter jitter', 'milliseconds', 'peers', 'ntpd.peer_jitter', 'line'],
+ 'lines': []
+ },
+ 'peer_xleave': {
+ 'options': [None, 'Interleave delay', 'milliseconds', 'peers', 'ntpd.peer_xleave', 'line'],
+ 'lines': []
+ },
+ 'peer_rootdelay': {
+ 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'peers',
+ 'ntpd.peer_rootdelay', 'line'],
+ 'lines': []
+ },
+ 'peer_rootdisp': {
+ 'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'peers',
+ 'ntpd.peer_rootdisp', 'line'],
+ 'lines': []
+ },
+ 'peer_stratum': {
+ 'options': [None, 'Stratum (1-15)', 'stratum', 'peers', 'ntpd.peer_stratum', 'line'],
+ 'lines': []
+ },
+ 'peer_hmode': {
+ 'options': [None, 'Host mode (1-6)', 'hmode', 'peers', 'ntpd.peer_hmode', 'line'],
+ 'lines': []
+ },
+ 'peer_pmode': {
+ 'options': [None, 'Peer mode (1-5)', 'pmode', 'peers', 'ntpd.peer_pmode', 'line'],
+ 'lines': []
+ },
+ 'peer_hpoll': {
+ 'options': [None, 'Host poll exponent', 'log2 s', 'peers', 'ntpd.peer_hpoll', 'line'],
+ 'lines': []
+ },
+ 'peer_ppoll': {
+ 'options': [None, 'Peer poll exponent', 'log2 s', 'peers', 'ntpd.peer_ppoll', 'line'],
+ 'lines': []
+ },
+ 'peer_precision': {
+ 'options': [None, 'Precision', 'log2 s', 'peers', 'ntpd.peer_precision', 'line'],
+ 'lines': []
+ }
+}
+
+
+class Base:
+ regex = re.compile(r'([a-z_]+)=((?:-)?[0-9]+(?:\.[0-9]+)?)')
+
+ @staticmethod
+ def get_header(associd=0, operation='readvar'):
+ """
+ Constructs the NTP Control Message header:
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |LI | VN |Mode |R|E|M| OpCode | Sequence Number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Status | Association ID |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Offset | Count |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+ version = 2
+ sequence = 1
+ status = 0
+ offset = 0
+ count = 0
+ header = struct.pack(HEADER_FORMAT, (version << 3 | MODE), OPCODES[operation],
+ sequence, status, associd, offset, count)
+ return header
+
+
+class System(Base):
+ def __init__(self):
+ self.request = self.get_header()
+
+ def get_data(self, raw):
+ """
+ Extracts key=value pairs with float/integer from ntp response packet data.
+ """
+ data = dict()
+ for key, value in self.regex.findall(raw):
+ data[key] = float(value) * PRECISION
+ return data
+
+
+class Peer(Base):
+ def __init__(self, idx, name):
+ self.id = idx
+ self.real_name = name
+ self.name = name.replace('.', '_')
+ self.request = self.get_header(self.id)
+
+ def get_data(self, raw):
+ """
+ Extracts key=value pairs with float/integer from ntp response packet data.
+ """
+ data = dict()
+ for key, value in self.regex.findall(raw):
+ dimension = '_'.join([self.name, key])
+ data[dimension] = float(value) * PRECISION
+ return data
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = dict(CHARTS)
+ self.port = 'ntp'
+ self.dgram_socket = True
+ self.system = System()
+ self.peers = dict()
+ self.request = str()
+ self.retries = 0
+ self.show_peers = self.configuration.get('show_peers', False)
+ self.peer_rescan = self.configuration.get('peer_rescan', 60)
+ if self.show_peers:
+ self.definitions.update(PEER_CHARTS)
+
+ def check(self):
+ """
+ Checks if we can get valid systemvars.
+ If not, returns None to disable module.
+ """
+ self._parse_config()
+
+ peer_filter = self.configuration.get('peer_filter', r'127\..*')
+ try:
+ self.peer_filter = re.compile(r'^((0\.0\.0\.0)|({0}))$'.format(peer_filter))
+ except re.error as error:
+ self.error('Compile pattern error (peer_filter) : {0}'.format(error))
+ return None
+
+ self.request = self.system.request
+ raw_systemvars = self._get_raw_data()
+
+ if not self.system.get_data(raw_systemvars):
+ return None
+
+ return True
+
+ def get_data(self):
+ """
+ Gets systemvars data on each update.
+ Gets peervars data for all peers on each update.
+ """
+ data = dict()
+
+ self.request = self.system.request
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ data.update(self.system.get_data(raw))
+
+ if not self.show_peers:
+ return data
+
+ if not self.peers or self.runs_counter % self.peer_rescan == 0 or self.retries > 8:
+ self.find_new_peers()
+
+ for peer in self.peers.values():
+ self.request = peer.request
+ peer_data = peer.get_data(self._get_raw_data())
+ if peer_data:
+ data.update(peer_data)
+ else:
+ self.retries += 1
+
+ return data
+
+ def find_new_peers(self):
+ new_peers = dict((p.real_name, p) for p in self.get_peers())
+ if new_peers:
+
+ peers_to_remove = set(self.peers) - set(new_peers)
+ peers_to_add = set(new_peers) - set(self.peers)
+
+ for peer_name in peers_to_remove:
+ self.hide_old_peer_from_charts(self.peers[peer_name])
+ del self.peers[peer_name]
+
+ for peer_name in peers_to_add:
+ self.add_new_peer_to_charts(new_peers[peer_name])
+
+ self.peers.update(new_peers)
+ self.retries = 0
+
+ def add_new_peer_to_charts(self, peer):
+ for chart_id in set(self.charts.charts) & set(PEER_CHARTS):
+ dim_id = peer.name + chart_id[4:]
+ if dim_id not in self.charts[chart_id]:
+ self.charts[chart_id].add_dimension([dim_id, peer.real_name, 'absolute', 1, PRECISION])
+ else:
+ self.charts[chart_id].hide_dimension(dim_id, reverse=True)
+
+ def hide_old_peer_from_charts(self, peer):
+ for chart_id in set(self.charts.charts) & set(PEER_CHARTS):
+ dim_id = peer.name + chart_id[4:]
+ self.charts[chart_id].hide_dimension(dim_id)
+
+ def get_peers(self):
+ self.request = Base.get_header(operation='readstat')
+
+ raw_data = self._get_raw_data(raw=True)
+ if not raw_data:
+ return list()
+
+ peer_ids = self.get_peer_ids(raw_data)
+ if not peer_ids:
+ return list()
+
+ new_peers = list()
+ for peer_id in peer_ids:
+ self.request = Base.get_header(peer_id)
+ raw_peer_data = self._get_raw_data()
+ if not raw_peer_data:
+ continue
+ srcadr = re.search(r'(srcadr)=([^,]+)', raw_peer_data)
+ if not srcadr:
+ continue
+ srcadr = srcadr.group(2)
+ if self.peer_filter.search(srcadr):
+ continue
+ stratum = re.search(r'(stratum)=([^,]+)', raw_peer_data)
+ if not stratum:
+ continue
+ if int(stratum.group(2)) > 15:
+ continue
+
+ new_peer = Peer(idx=peer_id, name=srcadr)
+ new_peers.append(new_peer)
+ return new_peers
+
+ def get_peer_ids(self, res):
+ """
+ Unpack the NTP Control Message header
+ Get data length from header
+ Get list of association ids returned in the readstat response
+ """
+
+ try:
+ count = struct.unpack(HEADER_FORMAT, res[:HEADER_LEN])[6]
+ except struct.error as error:
+ self.error('error unpacking header: {0}'.format(error))
+ return None
+ if not count:
+ self.error('empty data field in NTP control packet')
+ return None
+
+ data_end = HEADER_LEN + count
+ data = res[HEADER_LEN:data_end]
+ data_format = ''.join(['!', 'H' * int(count / 2)])
+ try:
+ peer_ids = list(struct.unpack(data_format, data))[::2]
+ except struct.error as error:
+ self.error('error unpacking data: {0}'.format(error))
+ return None
+ return peer_ids
diff --git a/collectors/python.d.plugin/ntpd/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf
new file mode 100644
index 0000000..80bd468
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/ntpd.conf
@@ -0,0 +1,89 @@
+# netdata python.d.plugin configuration for ntpd
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+#
+# Additionally to the above, ntp also supports the following:
+#
+# host: 'localhost' # the host to query
+# port: '123' # the UDP port where `ntpd` listens
+# show_peers: no # use `yes` to show peer charts. enabling this
+# # option is recommended only for debugging, as
+# # it could possibly imply memory leaks if the
+# # peers change frequently.
+# peer_filter: '127\..*' # regex to exclude peers
+# # by default local peers are hidden
+# # use `''` to show all peers.
+# peer_rescan: 60 # interval (>0) to check for new/changed peers
+# # use `1` to check on every update
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: '123'
+ show_peers: no
+
+localhost_ipv4:
+ name: 'local'
+ host: '127.0.0.1'
+ port: '123'
+ show_peers: no
+
+localhost_ipv6:
+ name: 'local'
+ host: '::1'
+ port: '123'
+ show_peers: no
diff --git a/collectors/python.d.plugin/nvidia_smi/Makefile.inc b/collectors/python.d.plugin/nvidia_smi/Makefile.inc
new file mode 100644
index 0000000..c23bd25
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nvidia_smi/nvidia_smi.chart.py
+dist_pythonconfig_DATA += nvidia_smi/nvidia_smi.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nvidia_smi/README.md nvidia_smi/Makefile.inc
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
new file mode 100644
index 0000000..48b6119
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/README.md
@@ -0,0 +1,40 @@
+# nvidia_smi
+
+This module monitors the `nvidia-smi` cli tool.
+
+**Requirements and Notes:**
+
+ * You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
+
+ * You must enable this plugin as its disabled by default due to minor performance issues.
+
+ * On some systems when the GPU is idle the `nvidia-smi` tool unloads and there is added latency again when it is next queried. If you are running GPUs under constant workload this isn't likely to be an issue.
+
+ * Currently the `nvidia-smi` tool is being queried via cli. Updating the plugin to use the nvidia c/c++ API directly should resolve this issue. See discussion here: https://github.com/netdata/netdata/pull/4357
+
+ * Contributions are welcome.
+
+ * Make sure `netdata` user can execute `/usr/bin/nvidia-smi` or wherever your binary is.
+
+ * `poll_seconds` is how often in seconds the tool is polled for as an integer.
+
+It produces:
+
+1. Per GPU
+ * GPU utilization
+ * memory allocation
+ * memory utilization
+ * fan speed
+ * power usage
+ * temperature
+ * clock speed
+ * PCI bandwidth
+
+### configuration
+
+Sample:
+
+```yaml
+poll_seconds: 1
+```
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnvidia_smi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
new file mode 100644
index 0000000..7cb816c
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
@@ -0,0 +1,374 @@
+# -*- coding: utf-8 -*-
+# Description: nvidia-smi netdata python.d module
+# Original Author: Steven Noonan (tycho)
+# Author: Ilya Mashchenko (l2isbad)
+
+import subprocess
+import threading
+import xml.etree.ElementTree as et
+
+from bases.collection import find_binary
+from bases.FrameworkServices.SimpleService import SimpleService
+
+disabled_by_default = True
+
+
+NVIDIA_SMI = 'nvidia-smi'
+
+BAD_VALUE = 'N/A'
+
+EMPTY_ROW = ''
+EMPTY_ROW_LIMIT = 500
+POLLER_BREAK_ROW = '</nvidia_smi_log>'
+
+PCI_BANDWIDTH = 'pci_bandwidth'
+FAN_SPEED = 'fan_speed'
+GPU_UTIL = 'gpu_utilization'
+MEM_UTIL = 'mem_utilization'
+ENCODER_UTIL = 'encoder_utilization'
+MEM_ALLOCATED = 'mem_allocated'
+TEMPERATURE = 'temperature'
+CLOCKS = 'clocks'
+POWER = 'power'
+
+ORDER = [
+ PCI_BANDWIDTH,
+ FAN_SPEED,
+ GPU_UTIL,
+ MEM_UTIL,
+ ENCODER_UTIL,
+ MEM_ALLOCATED,
+ TEMPERATURE,
+ CLOCKS,
+ POWER,
+]
+
+
+def gpu_charts(gpu):
+ fam = gpu.full_name()
+
+ charts = {
+ PCI_BANDWIDTH: {
+ 'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
+ 'lines': [
+ ['rx_util', 'rx', 'absolute', 1, 1],
+ ['tx_util', 'tx', 'absolute', 1, -1],
+ ]
+ },
+ FAN_SPEED: {
+ 'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
+ 'lines': [
+ ['fan_speed', 'speed'],
+ ]
+ },
+ GPU_UTIL: {
+ 'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'],
+ 'lines': [
+ ['gpu_util', 'utilization'],
+ ]
+ },
+ MEM_UTIL: {
+ 'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'],
+ 'lines': [
+ ['memory_util', 'utilization'],
+ ]
+ },
+ ENCODER_UTIL: {
+ 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization', 'line'],
+ 'lines': [
+ ['encoder_util', 'encoder'],
+ ['decoder_util', 'decoder'],
+ ]
+ },
+ MEM_ALLOCATED: {
+ 'options': [None, 'Memory Allocated', 'MiB', fam, 'nvidia_smi.memory_allocated', 'line'],
+ 'lines': [
+ ['fb_memory_usage', 'used'],
+ ]
+ },
+ TEMPERATURE: {
+ 'options': [None, 'Temperature', 'celsius', fam, 'nvidia_smi.temperature', 'line'],
+ 'lines': [
+ ['gpu_temp', 'temp'],
+ ]
+ },
+ CLOCKS: {
+ 'options': [None, 'Clock Frequencies', 'MHz', fam, 'nvidia_smi.clocks', 'line'],
+ 'lines': [
+ ['graphics_clock', 'graphics'],
+ ['video_clock', 'video'],
+ ['sm_clock', 'sm'],
+ ['mem_clock', 'mem'],
+ ]
+ },
+ POWER: {
+ 'options': [None, 'Power Utilization', 'Watts', fam, 'nvidia_smi.power', 'line'],
+ 'lines': [
+ ['power_draw', 'power', 1, 100],
+ ]
+ },
+ }
+
+ idx = gpu.num
+
+ order = ['gpu{0}_{1}'.format(idx, v) for v in ORDER]
+ charts = dict(('gpu{0}_{1}'.format(idx, k), v) for k, v in charts.items())
+
+ for chart in charts.values():
+ for line in chart['lines']:
+ line[0] = 'gpu{0}_{1}'.format(idx, line[0])
+
+ return order, charts
+
+
+class NvidiaSMI:
+ def __init__(self):
+ self.command = find_binary(NVIDIA_SMI)
+ self.active_proc = None
+
+ def run_once(self):
+ proc = subprocess.Popen([self.command, '-x', '-q'], stdout=subprocess.PIPE)
+ stdout, _ = proc.communicate()
+ return stdout
+
+ def run_loop(self, interval):
+ if self.active_proc:
+ self.kill()
+ proc = subprocess.Popen([self.command, '-x', '-q', '-l', str(interval)], stdout=subprocess.PIPE)
+ self.active_proc = proc
+ return proc.stdout
+
+ def kill(self):
+ if self.active_proc:
+ self.active_proc.kill()
+ self.active_proc = None
+
+
+class NvidiaSMIPoller(threading.Thread):
+ def __init__(self, poll_interval):
+ threading.Thread.__init__(self)
+ self.daemon = True
+
+ self.smi = NvidiaSMI()
+ self.interval = poll_interval
+
+ self.lock = threading.RLock()
+ self.last_data = str()
+ self.exit = False
+ self.empty_rows = 0
+ self.rows = list()
+
+ def has_smi(self):
+ return bool(self.smi.command)
+
+ def run_once(self):
+ return self.smi.run_once()
+
+ def run(self):
+ out = self.smi.run_loop(self.interval)
+
+ for row in out:
+ if self.exit or self.empty_rows > EMPTY_ROW_LIMIT:
+ break
+ self.process_row(row)
+ self.smi.kill()
+
+ def process_row(self, row):
+ row = row.decode()
+ self.empty_rows += (row == EMPTY_ROW)
+ self.rows.append(row)
+
+ if POLLER_BREAK_ROW in row:
+ self.lock.acquire()
+ self.last_data = '\n'.join(self.rows)
+ self.lock.release()
+
+ self.rows = list()
+ self.empty_rows = 0
+
+ def is_started(self):
+ return self.ident is not None
+
+ def shutdown(self):
+ self.exit = True
+
+ def data(self):
+ self.lock.acquire()
+ data = self.last_data
+ self.lock.release()
+ return data
+
+
+def handle_attr_error(method):
+ def on_call(*args, **kwargs):
+ try:
+ return method(*args, **kwargs)
+ except AttributeError:
+ return None
+ return on_call
+
+
+def handle_value_error(method):
+ def on_call(*args, **kwargs):
+ try:
+ return method(*args, **kwargs)
+ except ValueError:
+ return None
+ return on_call
+
+
+class GPU:
+ def __init__(self, num, root):
+ self.num = num
+ self.root = root
+
+ def id(self):
+ return self.root.get('id')
+
+ def name(self):
+ return self.root.find('product_name').text
+
+ def full_name(self):
+ return 'gpu{0} {1}'.format(self.num, self.name())
+
+ @handle_attr_error
+ def rx_util(self):
+ return self.root.find('pci').find('rx_util').text.split()[0]
+
+ @handle_attr_error
+ def tx_util(self):
+ return self.root.find('pci').find('tx_util').text.split()[0]
+
+ @handle_attr_error
+ def fan_speed(self):
+ return self.root.find('fan_speed').text.split()[0]
+
+ @handle_attr_error
+ def gpu_util(self):
+ return self.root.find('utilization').find('gpu_util').text.split()[0]
+
+ @handle_attr_error
+ def memory_util(self):
+ return self.root.find('utilization').find('memory_util').text.split()[0]
+
+ @handle_attr_error
+ def encoder_util(self):
+ return self.root.find('utilization').find('encoder_util').text.split()[0]
+
+ @handle_attr_error
+ def decoder_util(self):
+ return self.root.find('utilization').find('decoder_util').text.split()[0]
+
+ @handle_attr_error
+ def fb_memory_usage(self):
+ return self.root.find('fb_memory_usage').find('used').text.split()[0]
+
+ @handle_attr_error
+ def temperature(self):
+ return self.root.find('temperature').find('gpu_temp').text.split()[0]
+
+ @handle_attr_error
+ def graphics_clock(self):
+ return self.root.find('clocks').find('graphics_clock').text.split()[0]
+
+ @handle_attr_error
+ def video_clock(self):
+ return self.root.find('clocks').find('video_clock').text.split()[0]
+
+ @handle_attr_error
+ def sm_clock(self):
+ return self.root.find('clocks').find('sm_clock').text.split()[0]
+
+ @handle_attr_error
+ def mem_clock(self):
+ return self.root.find('clocks').find('mem_clock').text.split()[0]
+
+ @handle_value_error
+ @handle_attr_error
+ def power_draw(self):
+ return float(self.root.find('power_readings').find('power_draw').text.split()[0]) * 100
+
+ def data(self):
+ data = {
+ 'rx_util': self.rx_util(),
+ 'tx_util': self.tx_util(),
+ 'fan_speed': self.fan_speed(),
+ 'gpu_util': self.gpu_util(),
+ 'memory_util': self.memory_util(),
+ 'encoder_util': self.encoder_util(),
+ 'decoder_util': self.decoder_util(),
+ 'fb_memory_usage': self.fb_memory_usage(),
+ 'gpu_temp': self.temperature(),
+ 'graphics_clock': self.graphics_clock(),
+ 'video_clock': self.video_clock(),
+ 'sm_clock': self.sm_clock(),
+ 'mem_clock': self.mem_clock(),
+ 'power_draw': self.power_draw(),
+ }
+
+ return dict(
+ ('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items() if v is not None and v != BAD_VALUE
+ )
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ poll = int(configuration.get('poll_seconds', 1))
+ self.poller = NvidiaSMIPoller(poll)
+
+ def get_data(self):
+ if not self.poller.is_alive():
+ self.debug('poller is off')
+ return None
+
+ last_data = self.poller.data()
+
+ parsed = self.parse_xml(last_data)
+ if parsed is None:
+ return None
+
+ data = dict()
+ for idx, root in enumerate(parsed.findall('gpu')):
+ data.update(GPU(idx, root).data())
+
+ return data or None
+
+ def check(self):
+ if not self.poller.has_smi():
+ self.error("couldn't find '{0}' binary".format(NVIDIA_SMI))
+ return False
+
+ raw_data = self.poller.run_once()
+ if not raw_data:
+ self.error("failed to invoke '{0}' binary".format(NVIDIA_SMI))
+ return False
+
+ parsed = self.parse_xml(raw_data)
+ if parsed is None:
+ return False
+
+ gpus = parsed.findall('gpu')
+ if not gpus:
+ return False
+
+ self.create_charts(gpus)
+ self.poller.start()
+
+ return True
+
+ def parse_xml(self, data):
+ try:
+ return et.fromstring(data)
+ except et.ParseError as error:
+ self.error(error)
+
+ return None
+
+ def create_charts(self, gpus):
+ for idx, root in enumerate(gpus):
+ order, charts = gpu_charts(GPU(idx, root))
+ self.order.extend(order)
+ self.definitions.update(charts)
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
new file mode 100644
index 0000000..53e544a
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for nvidia_smi
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# poll_seconds: SECONDS # default is 1. Sets the frequency of seconds the nvidia-smi tool is polled.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/openldap/Makefile.inc b/collectors/python.d.plugin/openldap/Makefile.inc
new file mode 100644
index 0000000..dc947e2
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += openldap/openldap.chart.py
+dist_pythonconfig_DATA += openldap/openldap.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += openldap/README.md openldap/Makefile.inc
+
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
new file mode 100644
index 0000000..629cc15
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/README.md
@@ -0,0 +1,59 @@
+# openldap
+
+This module provides statistics information from openldap (slapd) server.
+Statistics are taken from LDAP monitoring interface. Manual page, slapd-monitor(5) is available.
+
+**Requirement:**
+* Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.
+* Install python ldap module `pip install ldap` or `yum install python-ldap`
+* Modify openldap.conf with your credentials
+
+### Module gives information with following charts:
+
+1. **connections**
+ * total connections number
+
+2. **Bytes**
+ * sent
+
+3. **operations**
+ * completed
+ * initiated
+
+4. **referrals**
+ * sent
+
+5. **entries**
+ * sent
+
+6. **ldap operations**
+ * bind
+ * search
+ * unbind
+ * add
+ * delete
+ * modify
+ * compare
+
+7. **waiters**
+ * read
+ * write
+
+
+
+### configuration
+
+Sample:
+
+```yaml
+openldap:
+ name : 'local'
+ username : "cn=monitor,dc=superb,dc=eu"
+ password : "testpass"
+ server : 'localhost'
+ port : 389
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fopenldap%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/openldap/openldap.chart.py b/collectors/python.d.plugin/openldap/openldap.chart.py
new file mode 100644
index 0000000..768ed01
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/openldap.chart.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+# Description: openldap netdata python.d module
+# Author: Manolis Kartsonakis (ekartsonakis)
+# SPDX-License-Identifier: GPL-3.0+
+
+try:
+ import ldap
+ HAS_LDAP = True
+except ImportError:
+ HAS_LDAP = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+DEFAULT_SERVER = 'localhost'
+DEFAULT_PORT = '389'
+DEFAULT_TIMEOUT = 1
+
+ORDER = [
+ 'total_connections',
+ 'bytes_sent',
+ 'operations',
+ 'referrals_sent',
+ 'entries_sent',
+ 'ldap_operations',
+ 'waiters'
+]
+
+CHARTS = {
+ 'total_connections': {
+ 'options': [None, 'Total Connections', 'connections/s', 'ldap', 'openldap.total_connections', 'line'],
+ 'lines': [
+ ['total_connections', 'connections', 'incremental']
+ ]
+ },
+ 'bytes_sent': {
+ 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'],
+ 'lines': [
+ ['bytes_sent', 'sent', 'incremental', 1, 1024]
+ ]
+ },
+ 'operations': {
+ 'options': [None, 'Operations Status', 'ops/s', 'ldap', 'openldap.operations_status', 'line'],
+ 'lines': [
+ ['completed_operations', 'completed', 'incremental'],
+ ['initiated_operations', 'initiated', 'incremental']
+ ]
+ },
+ 'referrals_sent': {
+ 'options': [None, 'Referrals', 'referals/s', 'ldap', 'openldap.referrals', 'line'],
+ 'lines': [
+ ['referrals_sent', 'sent', 'incremental']
+ ]
+ },
+ 'entries_sent': {
+ 'options': [None, 'Entries', 'entries/s', 'ldap', 'openldap.entries', 'line'],
+ 'lines': [
+ ['entries_sent', 'sent', 'incremental']
+ ]
+ },
+ 'ldap_operations': {
+ 'options': [None, 'Operations', 'ops/s', 'ldap', 'openldap.ldap_operations', 'line'],
+ 'lines': [
+ ['bind_operations', 'bind', 'incremental'],
+ ['search_operations', 'search', 'incremental'],
+ ['unbind_operations', 'unbind', 'incremental'],
+ ['add_operations', 'add', 'incremental'],
+ ['delete_operations', 'delete', 'incremental'],
+ ['modify_operations', 'modify', 'incremental'],
+ ['compare_operations', 'compare', 'incremental']
+ ]
+ },
+ 'waiters': {
+ 'options': [None, 'Waiters', 'waiters/s', 'ldap', 'openldap.waiters', 'line'],
+ 'lines': [
+ ['write_waiters', 'write', 'incremental'],
+ ['read_waiters', 'read', 'incremental']
+ ]
+ },
+}
+
+# Stuff to gather - make tuples of DN dn and attrib to get
+SEARCH_LIST = {
+ 'total_connections': (
+ 'cn=Total,cn=Connections,cn=Monitor', 'monitorCounter',
+ ),
+ 'bytes_sent': (
+ 'cn=Bytes,cn=Statistics,cn=Monitor', 'monitorCounter',
+ ),
+ 'completed_operations': (
+ 'cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'initiated_operations': (
+ 'cn=Operations,cn=Monitor', 'monitorOpInitiated',
+ ),
+ 'referrals_sent': (
+ 'cn=Referrals,cn=Statistics,cn=Monitor', 'monitorCounter',
+ ),
+ 'entries_sent': (
+ 'cn=Entries,cn=Statistics,cn=Monitor', 'monitorCounter',
+ ),
+ 'bind_operations': (
+ 'cn=Bind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'unbind_operations': (
+ 'cn=Unbind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'add_operations': (
+ 'cn=Add,cn=Operations,cn=Monitor', 'monitorOpInitiated',
+ ),
+ 'delete_operations': (
+ 'cn=Delete,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'modify_operations': (
+ 'cn=Modify,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'compare_operations': (
+ 'cn=Compare,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'search_operations': (
+ 'cn=Search,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'write_waiters': (
+ 'cn=Write,cn=Waiters,cn=Monitor', 'monitorCounter',
+ ),
+ 'read_waiters': (
+ 'cn=Read,cn=Waiters,cn=Monitor', 'monitorCounter',
+ ),
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.server = configuration.get('server', DEFAULT_SERVER)
+ self.port = configuration.get('port', DEFAULT_PORT)
+ self.username = configuration.get('username')
+ self.password = configuration.get('password')
+ self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
+ self.alive = False
+ self.conn = None
+
+ def disconnect(self):
+ if self.conn:
+ self.conn.unbind()
+ self.conn = None
+ self.alive = False
+
+ def connect(self):
+ try:
+ self.conn = ldap.initialize('ldap://%s:%s' % (self.server, self.port))
+ self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, self.timeout)
+ if self.username and self.password:
+ self.conn.simple_bind(self.username, self.password)
+ except ldap.LDAPError as error:
+ self.error(error)
+ return False
+
+ self.alive = True
+ return True
+
+ def reconnect(self):
+ self.disconnect()
+ return self.connect()
+
+ def check(self):
+ if not HAS_LDAP:
+ self.error("'python-ldap' package is needed")
+ return None
+
+ return self.connect() and self.get_data()
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ data = dict()
+ for key in SEARCH_LIST:
+ dn = SEARCH_LIST[key][0]
+ attr = SEARCH_LIST[key][1]
+ try:
+ num = self.conn.search(dn, ldap.SCOPE_BASE, 'objectClass=*', [attr, ])
+ result_type, result_data = self.conn.result(num, 1)
+ except ldap.LDAPError as error:
+ self.error("Empty result. Check bind username/password. Message: ",error)
+ self.alive = False
+ return None
+
+ try:
+ if result_type == 101:
+ val = int(result_data[0][1].values()[0][0])
+ except (ValueError, IndexError) as error:
+ self.debug(error)
+ continue
+
+ data[key] = val
+
+ return data
diff --git a/collectors/python.d.plugin/openldap/openldap.conf b/collectors/python.d.plugin/openldap/openldap.conf
new file mode 100644
index 0000000..6182b3e
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/openldap.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for openldap
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# postfix is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# ----------------------------------------------------------------------
+# OPENLDAP EXTRA PARAMETERS
+
+# Set here your LDAP connection settings
+
+#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics
+#password : "yourpass" # The password for the binded user
+#server : 'localhost' # The listening address of the LDAP server
+#port : 389 # The listening port of the LDAP server
+#timeout : 1 # Seconds to timeout if no connection exists \ No newline at end of file
diff --git a/collectors/python.d.plugin/ovpn_status_log/Makefile.inc b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc
new file mode 100644
index 0000000..1fbc506
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ovpn_status_log/ovpn_status_log.chart.py
+dist_pythonconfig_DATA += ovpn_status_log/ovpn_status_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ovpn_status_log/README.md ovpn_status_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
new file mode 100644
index 0000000..bcd1f00
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -0,0 +1,34 @@
+# ovpn_status_log
+
+Module monitor openvpn-status log file.
+
+**Requirements:**
+
+ * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+ so that multiple instances do not overwrite each other's output files.
+
+ * Make sure NETDATA USER CAN READ openvpn-status.log
+
+ * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+
+It produces:
+
+1. **Users** OpenVPN active users
+ * users
+
+2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
+ * in
+ * out
+
+### configuration
+
+Sample:
+
+```yaml
+default
+ log_path : '/var/log/openvpn-status.log'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fovpn_status_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
new file mode 100644
index 0000000..dc7a600
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+# Description: openvpn status log netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+update_every = 10
+
+ORDER = [
+ 'users',
+ 'traffic',
+]
+
+CHARTS = {
+ 'users': {
+ 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
+ 'lines': [
+ ['users', None, 'absolute'],
+ ]
+ },
+ 'traffic': {
+ 'options': [None, 'OpenVPN Traffic', 'KiB/s', 'traffic', 'openvpn_status.traffic', 'area'],
+ 'lines': [
+ ['bytes_in', 'in', 'incremental', 1, 1 << 10],
+ ['bytes_out', 'out', 'incremental', -1, 1 << 10]
+ ]
+ }
+}
+
+TLS_REGEX = re.compile(
+ r'(?:[0-9a-f]+:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)'
+)
+STATIC_KEY_REGEX = re.compile(
+ r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'
+)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.log_path = self.configuration.get('log_path')
+ self.regex = {
+ 'tls': TLS_REGEX,
+ 'static_key': STATIC_KEY_REGEX
+ }
+
+ def check(self):
+ if not (self.log_path and isinstance(self.log_path, str)):
+ self.error("'log_path' is not defined")
+ return False
+
+ data = self._get_raw_data()
+ if not data:
+ self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
+ return None
+
+ found = None
+ for row in data:
+ if 'ROUTING' in row:
+ self.get_data = self.get_data_tls
+ found = True
+ break
+ elif 'STATISTICS' in row:
+ self.get_data = self.get_data_static_key
+ found = True
+ break
+ if found:
+ return True
+ self.error('Failed to parse ovpenvpn log file')
+ return False
+
+ def _get_raw_data(self):
+ """
+ Open log file
+ :return: str
+ """
+
+ try:
+ with open(self.log_path) as log:
+ raw_data = log.readlines() or None
+ except OSError:
+ return None
+ else:
+ return raw_data
+
+ def get_data_static_key(self):
+ """
+ Parse openvpn-status log file.
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = dict(bytes_in=0, bytes_out=0)
+
+ for row in raw_data:
+ match = self.regex['static_key'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['direction'] == 'read':
+ data['bytes_in'] += int(match['bytes'])
+ else:
+ data['bytes_out'] += int(match['bytes'])
+
+ return data or None
+
+ def get_data_tls(self):
+ """
+ Parse openvpn-status log file.
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = dict(users=0, bytes_in=0, bytes_out=0)
+ for row in raw_data:
+ columns = row.split(',') if ',' in row else row.split()
+ if 'UNDEF' in columns:
+ # see https://openvpn.net/archive/openvpn-users/2004-08/msg00116.html
+ continue
+
+ match = self.regex['tls'].search(' '.join(columns))
+ if match:
+ match = match.groupdict()
+ data['users'] += 1
+ data['bytes_in'] += int(match['bytes_in'])
+ data['bytes_out'] += int(match['bytes_out'])
+
+ return data or None
diff --git a/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
new file mode 100644
index 0000000..1d71f6b
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
@@ -0,0 +1,97 @@
+# netdata python.d.plugin configuration for openvpn status log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, openvpn status log also supports the following:
+#
+# log_path: 'PATH' # the path to openvpn status log file
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+# IMPORTANT information
+#
+# 1. If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+# so that multiple instances do not overwrite each other's output files.
+# 2. Make sure NETDATA USER CAN READ openvpn-status.log
+#
+# * cd into directory with openvpn-status.log and run the following commands as root
+# * #chown :netdata openvpn-status.log && chmod 640 openvpn-status.log
+# * To check permission and group membership run
+# * #ls -l openvpn-status.log
+# -rw-r----- 1 root netdata 359 dec 21 21:22 openvpn-status.log
+#
+# 3. Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+# If its not true traffic chart WILL DISPLAY WRONG values
+#
+# Default OpenVPN update interval is 10 second on Debian 8
+# # ps -C openvpn -o command=
+# /usr/sbin/openvpn --daemon ovpn-server --status /run/openvpn/server.status 10 --cd /etc/openvpn --config /etc/openvpn/server.conf
+#
+# 4. Confirm status is configured in your OpenVPN configuration.
+# * Open OpenVPN config in an editor (e.g. sudo nano /etc/openvpn/default.conf)
+# * Confirm status is enabled with below:
+# status /var/log/openvpn-status.log
+#
+#default:
+# log_path: '/var/log/openvpn-status.log'
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/phpfpm/Makefile.inc b/collectors/python.d.plugin/phpfpm/Makefile.inc
new file mode 100644
index 0000000..ff312fe
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += phpfpm/phpfpm.chart.py
+dist_pythonconfig_DATA += phpfpm/phpfpm.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += phpfpm/README.md phpfpm/Makefile.inc
+
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
new file mode 100644
index 0000000..d3aa85a
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -0,0 +1,41 @@
+# phpfpm
+
+This module will monitor one or more php-fpm instances depending on configuration.
+
+**Requirements:**
+ * php-fpm with enabled `status` page
+ * access to `status` page via web server
+
+It produces following charts:
+
+1. **Active Connections**
+ * active
+ * maxActive
+ * idle
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Performance**
+ * reached
+ * slow
+
+### configuration
+
+Needs only `url` to server's `status`
+
+Here is an example for local instance:
+
+```yaml
+update_every : 3
+priority : 90100
+
+local:
+ url : 'http://localhost/status'
+```
+
+Without configuration, module attempts to connect to `http://localhost/status`
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fphpfpm%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
new file mode 100644
index 0000000..70091e2
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
@@ -0,0 +1,172 @@
+# -*- coding: utf-8 -*-
+# Description: PHP-FPM netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+import re
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
+
+POOL_INFO = [
+ ('active processes', 'active'),
+ ('max active processes', 'maxActive'),
+ ('idle processes', 'idle'),
+ ('accepted conn', 'requests'),
+ ('max children reached', 'reached'),
+ ('slow requests', 'slow')
+]
+
+PER_PROCESS_INFO = [
+ ('request duration', 'ReqDur'),
+ ('last request cpu', 'ReqCpu'),
+ ('last request memory', 'ReqMem')
+]
+
+
+def average(collection):
+ return sum(collection, 0.0) / max(len(collection), 1)
+
+
+CALC = [
+ ('min', min),
+ ('max', max),
+ ('avg', average)
+]
+
+ORDER = [
+ 'connections',
+ 'requests',
+ 'performance',
+ 'request_duration',
+ 'request_cpu',
+ 'request_mem',
+]
+
+CHARTS = {
+ 'connections': {
+ 'options': [None, 'PHP-FPM Active Connections', 'connections', 'active connections', 'phpfpm.connections',
+ 'line'],
+ 'lines': [
+ ['active'],
+ ['maxActive', 'max active'],
+ ['idle']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'PHP-FPM Requests', 'requests/s', 'requests', 'phpfpm.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'incremental']
+ ]
+ },
+ 'performance': {
+ 'options': [None, 'PHP-FPM Performance', 'status', 'performance', 'phpfpm.performance', 'line'],
+ 'lines': [
+ ['reached', 'max children reached'],
+ ['slow', 'slow requests']
+ ]
+ },
+ 'request_duration': {
+ 'options': [None, 'PHP-FPM Request Duration', 'milliseconds', 'request duration', 'phpfpm.request_duration',
+ 'line'],
+ 'lines': [
+ ['minReqDur', 'min', 'absolute', 1, 1000],
+ ['maxReqDur', 'max', 'absolute', 1, 1000],
+ ['avgReqDur', 'avg', 'absolute', 1, 1000]
+ ]
+ },
+ 'request_cpu': {
+ 'options': [None, 'PHP-FPM Request CPU', 'percentage', 'request CPU', 'phpfpm.request_cpu', 'line'],
+ 'lines': [
+ ['minReqCpu', 'min'],
+ ['maxReqCpu', 'max'],
+ ['avgReqCpu', 'avg']
+ ]
+ },
+ 'request_mem': {
+ 'options': [None, 'PHP-FPM Request Memory', 'KB', 'request memory', 'phpfpm.request_mem', 'line'],
+ 'lines': [
+ ['minReqMem', 'min', 'absolute', 1, 1024],
+ ['maxReqMem', 'max', 'absolute', 1, 1024],
+ ['avgReqMem', 'avg', 'absolute', 1, 1024]
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://localhost/status?full&json')
+ self.json = '&json' in self.url or '?json' in self.url
+ self.json_full = self.url.endswith(('?full&json', '?json&full'))
+ self.if_all_processes_running = dict(
+ [(c_name + p_name, 0) for c_name, func in CALC for metric, p_name in PER_PROCESS_INFO]
+ )
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ raw_json = parse_raw_data_(is_json=self.json, raw_data=raw)
+
+ # Per Pool info: active connections, requests and performance charts
+ to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
+
+ # Per Process Info: duration, cpu and memory charts (min, max, avg)
+ if self.json_full:
+ p_info = dict()
+ to_netdata.update(self.if_all_processes_running) # If all processes are in running state
+ # Metrics are always 0 if the process is not in Idle state because calculation is done
+ # when the request processing has terminated
+ for process in [p for p in raw_json['processes'] if p['state'] == 'Idle']:
+ p_info.update(fetch_data_(raw_data=process, metrics_list=PER_PROCESS_INFO, pid=str(process['pid'])))
+
+ if p_info:
+ for new_name in PER_PROCESS_INFO:
+ for name, func in CALC:
+ to_netdata[name + new_name[1]] = func([p_info[k] for k in p_info if new_name[1] in k])
+
+ return to_netdata or None
+
+
+def fetch_data_(raw_data, metrics_list, pid=''):
+ """
+ :param raw_data: dict
+ :param metrics_list: list
+ :param pid: str
+ :return: dict
+ """
+ result = dict()
+ for metric, new_name in metrics_list:
+ if metric in raw_data:
+ result[new_name + pid] = float(raw_data[metric])
+ return result
+
+
+def parse_raw_data_(is_json, raw_data):
+ """
+ :param is_json: bool
+ :param regex: compiled regular expr
+ :param raw_data: dict
+ :return: dict
+ """
+ if is_json:
+ try:
+ return json.loads(raw_data)
+ except ValueError:
+ return dict()
+ else:
+ raw_data = ' '.join(raw_data.split())
+ return dict(REGEX.findall(raw_data))
diff --git a/collectors/python.d.plugin/phpfpm/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf
new file mode 100644
index 0000000..d318539
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.conf
@@ -0,0 +1,88 @@
+# netdata python.d.plugin configuration for PHP-FPM
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, PHP-FPM also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+# # Be sure and include ?full&status at the end of the url
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : "http://localhost/status?full&json"
+
+localipv4:
+ name : 'local'
+ url : "http://127.0.0.1/status?full&json"
+
+localipv6:
+ name : 'local'
+ url : "http://[::1]/status?full&json"
+
diff --git a/collectors/python.d.plugin/portcheck/Makefile.inc b/collectors/python.d.plugin/portcheck/Makefile.inc
new file mode 100644
index 0000000..76763f0
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += portcheck/portcheck.chart.py
+dist_pythonconfig_DATA += portcheck/portcheck.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += portcheck/README.md portcheck/Makefile.inc
+
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
new file mode 100644
index 0000000..8f289c8
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -0,0 +1,37 @@
+# portcheck
+
+Module monitors a remote TCP service.
+
+Following charts are drawn per host:
+
+1. **Latency** ms
+ * Time required to connect to a TCP port.
+ Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
+
+2. **Status** boolean
+ * Connection successful
+ * Could not create socket: possible DNS problems
+ * Connection refused: port not listening or blocked
+ * Connection timed out: host or port unreachable
+
+
+### configuration
+
+```yaml
+server:
+ host: 'dns or ip' # required
+ port: 22 # required
+ timeout: 1 # optional
+ update_every: 1 # optional
+```
+
+### notes
+
+ * The error chart is intended for alarms, badges or for access via API.
+ * A system/service/firewall might block netdata's access if a portscan or
+ similar is detected.
+ * Currently, the accuracy of the latency is low and should be used as reference only.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fportcheck%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/portcheck/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
new file mode 100644
index 0000000..8479e38
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+# Description: simple port check netdata python.d module
+# Original Author: ccremer (github.com/ccremer)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+PORT_LATENCY = 'connect'
+
+PORT_SUCCESS = 'success'
+PORT_TIMEOUT = 'timeout'
+PORT_FAILED = 'no_connection'
+
+ORDER = ['latency', 'status']
+
+CHARTS = {
+ 'latency': {
+ 'options': [None, 'TCP connect latency', 'milliseconds', 'latency', 'portcheck.latency', 'line'],
+ 'lines': [
+ [PORT_LATENCY, 'connect', 'absolute', 100, 1000]
+ ]
+ },
+ 'status': {
+ 'options': [None, 'Portcheck status', 'boolean', 'status', 'portcheck.status', 'line'],
+ 'lines': [
+ [PORT_SUCCESS, 'success', 'absolute'],
+ [PORT_TIMEOUT, 'timeout', 'absolute'],
+ [PORT_FAILED, 'no connection', 'absolute']
+ ]
+ }
+}
+
+
+# Not deriving from SocketService, too much is different
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host')
+ self.port = self.configuration.get('port')
+ self.timeout = self.configuration.get('timeout', 1)
+
+ def check(self):
+ """
+ Parse configuration, check if configuration is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ if self.host is None or self.port is None:
+ self.error('Host or port missing')
+ return False
+ if not isinstance(self.port, int):
+ self.error('"port" is not an integer. Specify a numerical value, not service name.')
+ return False
+
+ self.debug('Enabled portcheck: {host}:{port}, update every {update}s, timeout: {timeout}s'.format(
+ host=self.host, port=self.port, update=self.update_every, timeout=self.timeout
+ ))
+ # We will accept any (valid-ish) configuration, even if initial connection fails (a service might be down from
+ # the beginning)
+ return True
+
+ def _get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ data = dict()
+ data[PORT_SUCCESS] = 0
+ data[PORT_TIMEOUT] = 0
+ data[PORT_FAILED] = 0
+
+ success = False
+ try:
+ for socket_config in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
+ # use first working socket
+ sock = self._create_socket(socket_config)
+ if sock is not None:
+ self._connect2socket(data, socket_config, sock)
+ self._disconnect(sock)
+ success = True
+ break
+ except socket.gaierror as error:
+ self.debug('Failed to connect to "{host}:{port}", error: {error}'.format(
+ host=self.host, port=self.port, error=error
+ ))
+
+ # We could not connect
+ if not success:
+ data[PORT_FAILED] = 1
+
+ return data
+
+ def _create_socket(self, socket_config):
+ af, sock_type, proto, _, sa = socket_config
+ try:
+ self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ sock = socket.socket(af, sock_type, proto)
+ sock.settimeout(self.timeout)
+ return sock
+ except socket.error as error:
+ self.debug('Failed to create socket "{address}", port {port}, error: {error}'.format(
+ address=sa[0], port=sa[1], error=error
+ ))
+ return None
+
+ def _connect2socket(self, data, socket_config, sock):
+ """
+ Connect to a socket, passing the result of getaddrinfo()
+ :return: dict
+ """
+
+ _, _, _, _, sa = socket_config
+ port = str(sa[1])
+ try:
+ self.debug('Connecting socket to "{address}", port {port}'.format(address=sa[0], port=port))
+ start = time()
+ sock.connect(sa)
+ diff = time() - start
+ self.debug('Connected to "{address}", port {port}, latency {latency}'.format(
+ address=sa[0], port=port, latency=diff
+ ))
+ # we will set it at least 0.1 ms. 0.0 would mean failed connection (handy for 3rd-party-APIs)
+ data[PORT_LATENCY] = max(round(diff * 10000), 0)
+ data[PORT_SUCCESS] = 1
+
+ except socket.timeout as error:
+ self.debug('Socket timed out on "{address}", port {port}, error: {error}'.format(
+ address=sa[0], port=port, error=error
+ ))
+ data[PORT_TIMEOUT] = 1
+
+ except socket.error as error:
+ self.debug('Failed to connect to "{address}", port {port}, error: {error}'.format(
+ address=sa[0], port=port, error=error
+ ))
+ data[PORT_FAILED] = 1
+
+ def _disconnect(self, sock):
+ """
+ Close socket connection
+ :return:
+ """
+ if sock is not None:
+ try:
+ self.debug('Closing socket')
+ sock.shutdown(2) # 0 - read, 1 - write, 2 - all
+ sock.close()
+ except socket.error:
+ pass
diff --git a/collectors/python.d.plugin/portcheck/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf
new file mode 100644
index 0000000..df67824
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/portcheck.conf
@@ -0,0 +1,74 @@
+# netdata python.d.plugin configuration for portcheck
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to backends) and deleted one hour
+# later (configurable from netdata.conf).
+# -- For this plugin, cleanup MUST be disabled, otherwise we lose latency chart
+chart_cleanup: 0
+
+# Autodetection and retries do not work for this plugin
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# -------------------------------
+# ATTENTION: Any valid configuration will be accepted, even if initial connection fails!
+# -------------------------------
+#
+# There is intentionally no default config for 'localhost'
+
+# job_name:
+# name: myname # [optional] the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # [optional] the JOB's data collection frequency
+# priority: 60000 # [optional] the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# timeout: 1 # [optional] the socket timeout when connecting
+# host: 'dns or ip' # [required] the remote host address in either IPv4, IPv6 or as DNS name.
+# port: 22 # [required] the port number to check. Specify an integer, not service name.
+
+# You just have been warned about possible portscan blocking. The portcheck plugin is meant for simple use cases.
+# Currently, the accuracy of the latency is low and should be used as reference only.
+
diff --git a/collectors/python.d.plugin/postfix/Makefile.inc b/collectors/python.d.plugin/postfix/Makefile.inc
new file mode 100644
index 0000000..f4091b2
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += postfix/postfix.chart.py
+dist_pythonconfig_DATA += postfix/postfix.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postfix/README.md postfix/Makefile.inc
+
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
new file mode 100644
index 0000000..e2147ac
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -0,0 +1,17 @@
+# postfix
+
+Simple module executing `postfix -p` to grab postfix queue.
+
+It produces only two charts:
+
+1. **Postfix Queue Emails**
+ * emails
+
+2. **Postfix Queue Emails Size** in KB
+ * size
+
+Configuration is not needed.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostfix%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/postfix/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
new file mode 100644
index 0000000..b650514
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/postfix.chart.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Description: postfix netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+POSTQUEUE_COMMAND = 'postqueue -p'
+
+ORDER = [
+ 'qemails',
+ 'qsize',
+]
+
+CHARTS = {
+ 'qemails': {
+ 'options': [None, 'Postfix Queue Emails', 'emails', 'queue', 'postfix.qemails', 'line'],
+ 'lines': [
+ ['emails', None, 'absolute']
+ ]
+ },
+ 'qsize': {
+ 'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'],
+ 'lines': [
+ ['size', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = POSTQUEUE_COMMAND
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()[-1].split(' ')
+ if raw[0] == 'Mail' and raw[1] == 'queue':
+ return {'emails': 0,
+ 'size': 0}
+
+ return {'emails': raw[4],
+ 'size': raw[1]}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/postfix/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf
new file mode 100644
index 0000000..a4d2472
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/postfix.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for postfix
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# postfix is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, postfix also supports the following:
+#
+# command: 'postqueue -p' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'postqueue -p'
diff --git a/collectors/python.d.plugin/postgres/Makefile.inc b/collectors/python.d.plugin/postgres/Makefile.inc
new file mode 100644
index 0000000..91a185c
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += postgres/postgres.chart.py
+dist_pythonconfig_DATA += postgres/postgres.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postgres/README.md postgres/Makefile.inc
+
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
new file mode 100644
index 0000000..9939a0c
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -0,0 +1,70 @@
+# postgres
+
+Module monitors one or more postgres servers.
+
+**Requirements:**
+
+ * `python-psycopg2` package. You have to install it manually.
+
+Following charts are drawn:
+
+1. **Database size** MB
+ * size
+
+2. **Current Backend Processes** processes
+ * active
+
+3. **Write-Ahead Logging Statistics** files/s
+ * total
+ * ready
+ * done
+
+4. **Checkpoints** writes/s
+ * scheduled
+ * requested
+
+5. **Current connections to db** count
+ * connections
+
+6. **Tuples returned from db** tuples/s
+ * sequential
+ * bitmap
+
+7. **Tuple reads from db** reads/s
+ * disk
+ * cache
+
+8. **Transactions on db** transactions/s
+ * committed
+ * rolled back
+
+9. **Tuples written to db** writes/s
+ * inserted
+ * updated
+ * deleted
+ * conflicts
+
+10. **Locks on db** count per type
+ * locks
+
+### configuration
+
+```yaml
+socket:
+ name : 'socket'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'tcp'
+ user : 'postgres'
+ database : 'postgres'
+ host : 'localhost'
+ port : 5432
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpostgres%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
new file mode 100644
index 0000000..e988eec
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
@@ -0,0 +1,1092 @@
+# -*- coding: utf-8 -*-
+# Description: example netdata python.d module
+# Authors: facetoe, dangtranhoang
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from copy import deepcopy
+
+try:
+ import psycopg2
+ from psycopg2 import extensions
+ from psycopg2.extras import DictCursor
+ from psycopg2 import OperationalError
+ PSYCOPG2 = True
+except ImportError:
+ PSYCOPG2 = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+DEFAULT_PORT = 5432
+DEFAULT_USER = 'postgres'
+DEFAULT_CONNECT_TIMEOUT = 2 # seconds
+DEFAULT_STATEMENT_TIMEOUT = 5000 # ms
+
+
+WAL = 'WAL'
+ARCHIVE = 'ARCHIVE'
+BACKENDS = 'BACKENDS'
+TABLE_STATS = 'TABLE_STATS'
+INDEX_STATS = 'INDEX_STATS'
+DATABASE = 'DATABASE'
+BGWRITER = 'BGWRITER'
+LOCKS = 'LOCKS'
+DATABASES = 'DATABASES'
+STANDBY = 'STANDBY'
+REPLICATION_SLOT = 'REPLICATION_SLOT'
+STANDBY_DELTA = 'STANDBY_DELTA'
+REPSLOT_FILES = 'REPSLOT_FILES'
+IF_SUPERUSER = 'IF_SUPERUSER'
+SERVER_VERSION = 'SERVER_VERSION'
+AUTOVACUUM = 'AUTOVACUUM'
+DIFF_LSN = 'DIFF_LSN'
+WAL_WRITES = 'WAL_WRITES'
+
+METRICS = {
+ DATABASE: [
+ 'connections',
+ 'xact_commit',
+ 'xact_rollback',
+ 'blks_read',
+ 'blks_hit',
+ 'tup_returned',
+ 'tup_fetched',
+ 'tup_inserted',
+ 'tup_updated',
+ 'tup_deleted',
+ 'conflicts',
+ 'temp_files',
+ 'temp_bytes',
+ 'size'
+ ],
+ BACKENDS: [
+ 'backends_active',
+ 'backends_idle'
+ ],
+ INDEX_STATS: [
+ 'index_count',
+ 'index_size'
+ ],
+ TABLE_STATS: [
+ 'table_size',
+ 'table_count'
+ ],
+ WAL: [
+ 'written_wal',
+ 'recycled_wal',
+ 'total_wal'
+ ],
+ WAL_WRITES: [
+ 'wal_writes'
+ ],
+ ARCHIVE: [
+ 'ready_count',
+ 'done_count',
+ 'file_count'
+ ],
+ BGWRITER: [
+ 'checkpoint_scheduled',
+ 'checkpoint_requested',
+ 'buffers_checkpoint',
+ 'buffers_clean',
+ 'maxwritten_clean',
+ 'buffers_backend',
+ 'buffers_alloc',
+ 'buffers_backend_fsync'
+ ],
+ LOCKS: [
+ 'ExclusiveLock',
+ 'RowShareLock',
+ 'SIReadLock',
+ 'ShareUpdateExclusiveLock',
+ 'AccessExclusiveLock',
+ 'AccessShareLock',
+ 'ShareRowExclusiveLock',
+ 'ShareLock',
+ 'RowExclusiveLock'
+ ],
+ AUTOVACUUM: [
+ 'analyze',
+ 'vacuum_analyze',
+ 'vacuum',
+ 'vacuum_freeze',
+ 'brin_summarize'
+ ],
+ STANDBY_DELTA: [
+ 'sent_delta',
+ 'write_delta',
+ 'flush_delta',
+ 'replay_delta'
+ ],
+ REPSLOT_FILES: [
+ 'replslot_wal_keep',
+ 'replslot_files'
+ ]
+}
+
+NO_VERSION = 0
+DEFAULT = 'DEFAULT'
+V96 = 'V96'
+V10 = 'V10'
+V11 = 'V11'
+
+
+QUERY_WAL = {
+ DEFAULT: """
+SELECT
+ count(*) as total_wal,
+ count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
+ count(*) FILTER (WHERE type = 'written') AS written_wal
+FROM
+ (SELECT
+ wal.name,
+ pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END ),
+ CASE
+ WHEN wal.name > pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END ) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_wal') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY
+ (pg_stat_file('pg_wal/'||name)).modification,
+ wal.name DESC) sub;
+""",
+ V96: """
+SELECT
+ count(*) as total_wal,
+ count(*) FILTER (WHERE type = 'recycled') AS recycled_wal,
+ count(*) FILTER (WHERE type = 'written') AS written_wal
+FROM
+ (SELECT
+ wal.name,
+ pg_xlogfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_xlog_location()
+ END ),
+ CASE
+ WHEN wal.name > pg_xlogfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_xlog_location()
+ END ) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_xlog') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY
+ (pg_stat_file('pg_xlog/'||name)).modification,
+ wal.name DESC) sub;
+""",
+}
+
+QUERY_ARCHIVE = {
+ DEFAULT: """
+SELECT
+ CAST(COUNT(*) AS INT) AS file_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
+FROM
+ pg_catalog.pg_ls_dir('pg_wal/archive_status') AS archive_files (archive_file);
+""",
+ V96: """
+SELECT
+ CAST(COUNT(*) AS INT) AS file_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),0) AS INT) AS ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),0) AS INT) AS done_count
+FROM
+ pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
+
+""",
+}
+
+QUERY_BACKEND = {
+ DEFAULT: """
+SELECT
+ count(*) - (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_active,
+ (SELECT count(*)
+ FROM pg_stat_activity
+ WHERE state = 'idle')
+ AS backends_idle
+FROM pg_stat_activity;
+""",
+}
+
+QUERY_TABLE_STATS = {
+ DEFAULT: """
+SELECT
+ ((sum(relpages) * 8) * 1024) AS table_size,
+ count(1) AS table_count
+FROM pg_class
+WHERE relkind IN ('r', 't');
+""",
+}
+
+QUERY_INDEX_STATS = {
+ DEFAULT: """
+SELECT
+ ((sum(relpages) * 8) * 1024) AS index_size,
+ count(1) AS index_count
+FROM pg_class
+WHERE relkind = 'i';
+""",
+}
+
+QUERY_DATABASE = {
+ DEFAULT: """
+SELECT
+ datname AS database_name,
+ numbackends AS connections,
+ xact_commit AS xact_commit,
+ xact_rollback AS xact_rollback,
+ blks_read AS blks_read,
+ blks_hit AS blks_hit,
+ tup_returned AS tup_returned,
+ tup_fetched AS tup_fetched,
+ tup_inserted AS tup_inserted,
+ tup_updated AS tup_updated,
+ tup_deleted AS tup_deleted,
+ conflicts AS conflicts,
+ pg_database_size(datname) AS size,
+ temp_files AS temp_files,
+ temp_bytes AS temp_bytes
+FROM pg_stat_database
+WHERE datname IN %(databases)s ;
+""",
+}
+
+QUERY_BGWRITER = {
+ DEFAULT: """
+SELECT
+ checkpoints_timed AS checkpoint_scheduled,
+ checkpoints_req AS checkpoint_requested,
+ buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint,
+ buffers_clean * current_setting('block_size')::numeric buffers_clean,
+ maxwritten_clean,
+ buffers_backend * current_setting('block_size')::numeric buffers_backend,
+ buffers_alloc * current_setting('block_size')::numeric buffers_alloc,
+ buffers_backend_fsync
+FROM pg_stat_bgwriter;
+""",
+}
+
+QUERY_LOCKS = {
+ DEFAULT: """
+SELECT
+ pg_database.datname as database_name,
+ mode,
+ count(mode) AS locks_count
+FROM pg_locks
+INNER JOIN pg_database
+ ON pg_database.oid = pg_locks.database
+GROUP BY datname, mode
+ORDER BY datname, mode;
+""",
+}
+
+QUERY_DATABASES = {
+ DEFAULT: """
+SELECT
+ datname
+FROM pg_stat_database
+WHERE
+ has_database_privilege(
+ (SELECT current_user), datname, 'connect')
+ AND NOT datname ~* '^template\d ';
+""",
+}
+
+QUERY_STANDBY = {
+ DEFAULT: """
+SELECT
+ application_name
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL
+GROUP BY application_name;
+""",
+}
+
+QUERY_REPLICATION_SLOT = {
+ DEFAULT: """
+SELECT slot_name
+FROM pg_replication_slots;
+"""
+}
+
+QUERY_STANDBY_DELTA = {
+ DEFAULT: """
+SELECT
+ application_name,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ sent_lsn) AS sent_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ write_lsn) AS write_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ flush_lsn) AS flush_delta,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ replay_lsn) AS replay_delta
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL;
+""",
+ V96: """
+SELECT
+ application_name,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ sent_location) AS sent_delta,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ write_location) AS write_delta,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ flush_location) AS flush_delta,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ replay_location) AS replay_delta
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL;
+""",
+}
+
+QUERY_REPSLOT_FILES = {
+ DEFAULT: """
+WITH wal_size AS (
+ SELECT
+ setting::int AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn)
+ - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+""",
+ V10: """
+WITH wal_size AS (
+ SELECT
+ current_setting('wal_block_size')::INT * setting::INT AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn)
+ - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+""",
+}
+
+QUERY_SUPERUSER = {
+ DEFAULT: """
+SELECT current_setting('is_superuser') = 'on' AS is_superuser;
+""",
+}
+
+QUERY_SHOW_VERSION = {
+ DEFAULT: """
+SHOW server_version_num;
+""",
+}
+
+QUERY_AUTOVACUUM = {
+ DEFAULT: """
+SELECT
+ count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%'
+ AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%'
+ AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum,
+ count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze,
+ count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize
+FROM pg_stat_activity
+WHERE query NOT LIKE '%%pg_stat_activity%%';
+""",
+}
+
+QUERY_DIFF_LSN = {
+ DEFAULT: """
+SELECT
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ '0/0') as wal_writes ;
+""",
+ V96: """
+SELECT
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ '0/0') as wal_writes ;
+""",
+}
+
+
+def query_factory(name, version=NO_VERSION):
+ if name == BACKENDS:
+ return QUERY_BACKEND[DEFAULT]
+ elif name == TABLE_STATS:
+ return QUERY_TABLE_STATS[DEFAULT]
+ elif name == INDEX_STATS:
+ return QUERY_INDEX_STATS[DEFAULT]
+ elif name == DATABASE:
+ return QUERY_DATABASE[DEFAULT]
+ elif name == BGWRITER:
+ return QUERY_BGWRITER[DEFAULT]
+ elif name == LOCKS:
+ return QUERY_LOCKS[DEFAULT]
+ elif name == DATABASES:
+ return QUERY_DATABASES[DEFAULT]
+ elif name == STANDBY:
+ return QUERY_STANDBY[DEFAULT]
+ elif name == REPLICATION_SLOT:
+ return QUERY_REPLICATION_SLOT[DEFAULT]
+ elif name == IF_SUPERUSER:
+ return QUERY_SUPERUSER[DEFAULT]
+ elif name == SERVER_VERSION:
+ return QUERY_SHOW_VERSION[DEFAULT]
+ elif name == AUTOVACUUM:
+ return QUERY_AUTOVACUUM[DEFAULT]
+ elif name == WAL:
+ if version < 100000:
+ return QUERY_WAL[V96]
+ return QUERY_WAL[DEFAULT]
+ elif name == ARCHIVE:
+ if version < 100000:
+ return QUERY_ARCHIVE[V96]
+ return QUERY_ARCHIVE[DEFAULT]
+ elif name == STANDBY_DELTA:
+ if version < 100000:
+ return QUERY_STANDBY_DELTA[V96]
+ return QUERY_STANDBY_DELTA[DEFAULT]
+ elif name == REPSLOT_FILES:
+ if version < 110000:
+ return QUERY_REPSLOT_FILES[V10]
+ return QUERY_REPSLOT_FILES[DEFAULT]
+ elif name == DIFF_LSN:
+ if version < 100000:
+ return QUERY_DIFF_LSN[V96]
+ return QUERY_DIFF_LSN[DEFAULT]
+
+ raise ValueError('unknown query')
+
+
+ORDER = [
+ 'db_stat_temp_files',
+ 'db_stat_temp_bytes',
+ 'db_stat_blks',
+ 'db_stat_tuple_returned',
+ 'db_stat_tuple_write',
+ 'db_stat_transactions',
+ 'db_stat_connections',
+ 'database_size',
+ 'backend_process',
+ 'index_count',
+ 'index_size',
+ 'table_count',
+ 'table_size',
+ 'wal',
+ 'wal_writes',
+ 'archive_wal',
+ 'checkpointer',
+ 'stat_bgwriter_alloc',
+ 'stat_bgwriter_checkpoint',
+ 'stat_bgwriter_backend',
+ 'stat_bgwriter_backend_fsync',
+ 'stat_bgwriter_bgwriter',
+ 'stat_bgwriter_maxwritten',
+ 'replication_slot',
+ 'standby_delta',
+ 'autovacuum'
+]
+
+CHARTS = {
+ 'db_stat_transactions': {
+ 'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions',
+ 'line'],
+ 'lines': [
+ ['xact_commit', 'committed', 'incremental'],
+ ['xact_rollback', 'rolled back', 'incremental']
+ ]
+ },
+ 'db_stat_connections': {
+ 'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections',
+ 'line'],
+ 'lines': [
+ ['connections', 'connections', 'absolute']
+ ]
+ },
+ 'db_stat_blks': {
+ 'options': [None, 'Disk blocks reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_blks', 'line'],
+ 'lines': [
+ ['blks_read', 'disk', 'incremental'],
+ ['blks_hit', 'cache', 'incremental']
+ ]
+ },
+ 'db_stat_tuple_returned': {
+ 'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned',
+ 'line'],
+ 'lines': [
+ ['tup_returned', 'sequential', 'incremental'],
+ ['tup_fetched', 'bitmap', 'incremental']
+ ]
+ },
+ 'db_stat_tuple_write': {
+ 'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'],
+ 'lines': [
+ ['tup_inserted', 'inserted', 'incremental'],
+ ['tup_updated', 'updated', 'incremental'],
+ ['tup_deleted', 'deleted', 'incremental'],
+ ['conflicts', 'conflicts', 'incremental']
+ ]
+ },
+ 'db_stat_temp_bytes': {
+ 'options': [None, 'Temp files written to disk', 'KiB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
+ 'line'],
+ 'lines': [
+ ['temp_bytes', 'size', 'incremental', 1, 1024]
+ ]
+ },
+ 'db_stat_temp_files': {
+ 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files',
+ 'line'],
+ 'lines': [
+ ['temp_files', 'files', 'incremental']
+ ]
+ },
+ 'database_size': {
+ 'options': [None, 'Database size', 'MiB', 'database size', 'postgres.db_size', 'stacked'],
+ 'lines': [
+ ]
+ },
+ 'backend_process': {
+ 'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process',
+ 'line'],
+ 'lines': [
+ ['backends_active', 'active', 'absolute'],
+ ['backends_idle', 'idle', 'absolute']
+ ]
+ },
+ 'index_count': {
+ 'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
+ 'lines': [
+ ['index_count', 'total', 'absolute']
+ ]
+ },
+ 'index_size': {
+ 'options': [None, 'Indexes size', 'MiB', 'indexes', 'postgres.index_size', 'line'],
+ 'lines': [
+ ['index_size', 'size', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'table_count': {
+ 'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'],
+ 'lines': [
+ ['table_count', 'total', 'absolute']
+ ]
+ },
+ 'table_size': {
+ 'options': [None, 'Tables size', 'MiB', 'tables', 'postgres.table_size', 'line'],
+ 'lines': [
+ ['table_size', 'size', 'absolute', 1, 1024 * 1024]
+ ]
+ },
+ 'wal': {
+ 'options': [None, 'Write-Ahead Logs', 'files', 'wal', 'postgres.wal', 'line'],
+ 'lines': [
+ ['written_wal', 'written', 'absolute'],
+ ['recycled_wal', 'recycled', 'absolute'],
+ ['total_wal', 'total', 'absolute']
+ ]
+ },
+ 'wal_writes': {
+ 'options': [None, 'Write-Ahead Logs', 'KiB/s', 'wal_writes', 'postgres.wal_writes', 'line'],
+ 'lines': [
+ ['wal_writes', 'writes', 'incremental', 1, 1024]
+ ]
+ },
+ 'archive_wal': {
+ 'options': [None, 'Archive Write-Ahead Logs', 'files/s', 'archive wal', 'postgres.archive_wal', 'line'],
+ 'lines': [
+ ['file_count', 'total', 'incremental'],
+ ['ready_count', 'ready', 'incremental'],
+ ['done_count', 'done', 'incremental']
+ ]
+ },
+ 'checkpointer': {
+ 'options': [None, 'Checkpoints', 'writes', 'checkpointer', 'postgres.checkpointer', 'line'],
+ 'lines': [
+ ['checkpoint_scheduled', 'scheduled', 'incremental'],
+ ['checkpoint_requested', 'requested', 'incremental']
+ ]
+ },
+ 'stat_bgwriter_alloc': {
+ 'options': [None, 'Buffers allocated', 'KiB/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
+ 'lines': [
+ ['buffers_alloc', 'alloc', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_checkpoint': {
+ 'options': [None, 'Buffers written during checkpoints', 'KiB/s', 'bgwriter',
+ 'postgres.stat_bgwriter_checkpoint', 'line'],
+ 'lines': [
+ ['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_backend': {
+ 'options': [None, 'Buffers written directly by a backend', 'KiB/s', 'bgwriter',
+ 'postgres.stat_bgwriter_backend', 'line'],
+ 'lines': [
+ ['buffers_backend', 'backend', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_backend_fsync': {
+ 'options': [None, 'Fsync by backend', 'times', 'bgwriter', 'postgres.stat_bgwriter_backend_fsync', 'line'],
+ 'lines': [
+ ['buffers_backend_fsync', 'backend fsync', 'incremental']
+ ]
+ },
+ 'stat_bgwriter_bgwriter': {
+ 'options': [None, 'Buffers written by the background writer', 'KiB/s', 'bgwriter',
+ 'postgres.bgwriter_bgwriter', 'line'],
+ 'lines': [
+ ['buffers_clean', 'clean', 'incremental', 1, 1024]
+ ]
+ },
+ 'stat_bgwriter_maxwritten': {
+ 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten',
+ 'line'],
+ 'lines': [
+ ['maxwritten_clean', 'maxwritten', 'incremental']
+ ]
+ },
+ 'autovacuum': {
+ 'options': [None, 'Autovacuum workers', 'workers', 'autovacuum', 'postgres.autovacuum', 'line'],
+ 'lines': [
+ ['analyze', 'analyze', 'absolute'],
+ ['vacuum', 'vacuum', 'absolute'],
+ ['vacuum_analyze', 'vacuum analyze', 'absolute'],
+ ['vacuum_freeze', 'vacuum freeze', 'absolute'],
+ ['brin_summarize', 'brin summarize', 'absolute']
+ ]
+ },
+ 'standby_delta': {
+ 'options': [None, 'Standby delta', 'KiB', 'replication delta', 'postgres.standby_delta', 'line'],
+ 'lines': [
+ ['sent_delta', 'sent delta', 'absolute', 1, 1024],
+ ['write_delta', 'write delta', 'absolute', 1, 1024],
+ ['flush_delta', 'flush delta', 'absolute', 1, 1024],
+ ['replay_delta', 'replay delta', 'absolute', 1, 1024]
+ ]
+ },
+ 'replication_slot': {
+ 'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'],
+ 'lines': [
+ ['replslot_wal_keep', 'wal keeped', 'absolute'],
+ ['replslot_files', 'pg_replslot files', 'absolute']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = deepcopy(CHARTS)
+ self.do_table_stats = configuration.pop('table_stats', False)
+ self.do_index_stats = configuration.pop('index_stats', False)
+ self.databases_to_poll = configuration.pop('database_poll', None)
+ self.statement_timeout = configuration.pop('statement_timeout', DEFAULT_STATEMENT_TIMEOUT)
+ self.configuration = configuration
+ self.conn = None
+ self.server_version = None
+ self.is_superuser = False
+ self.alive = False
+ self.databases = list()
+ self.secondaries = list()
+ self.replication_slots = list()
+ self.queries = dict()
+ self.data = dict()
+
+ def reconnect(self):
+ return self.connect()
+
+ def connect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+
+ try:
+ params = dict(
+ host=None,
+ port=DEFAULT_PORT,
+ database=None,
+ user=DEFAULT_USER,
+ password=None,
+ connect_timeout=DEFAULT_CONNECT_TIMEOUT,
+ options='-c statement_timeout={0}'.format(self.statement_timeout),
+ )
+ params.update(self.configuration)
+
+ self.conn = psycopg2.connect(**params)
+ self.conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ self.conn.set_session(readonly=True)
+ except OperationalError as error:
+ self.error(error)
+ self.alive = False
+ else:
+ self.alive = True
+
+ return self.alive
+
+ def check(self):
+ if not PSYCOPG2:
+ self.error("'python-psycopg2' package is needed to use postgres module")
+ return False
+
+ if not self.connect():
+ self.error('failed to connect to {0}'.format(hide_password(self.configuration)))
+ return False
+
+ try:
+ self.check_queries()
+ except Exception as error:
+ self.error(error)
+ return False
+
+ self.populate_queries()
+ self.create_dynamic_charts()
+
+ return True
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ try:
+ cursor = self.conn.cursor(cursor_factory=DictCursor)
+
+ self.data.update(zero_lock_types(self.databases))
+
+ for query, metrics in self.queries.items():
+ self.query_stats(cursor, query, metrics)
+
+ except OperationalError:
+ self.alive = False
+ return None
+
+ cursor.close()
+
+ return self.data
+
+ def query_stats(self, cursor, query, metrics):
+ cursor.execute(query, dict(databases=tuple(self.databases)))
+
+ for row in cursor:
+ for metric in metrics:
+ # databases
+ if 'database_name' in row:
+ dimension_id = '_'.join([row['database_name'], metric])
+ # secondaries
+ elif 'application_name' in row:
+ dimension_id = '_'.join([row['application_name'], metric])
+ # replication slots
+ elif 'slot_name' in row:
+ dimension_id = '_'.join([row['slot_name'], metric])
+ # other
+ else:
+ dimension_id = metric
+
+ if metric in row:
+ if row[metric] is not None:
+ self.data[dimension_id] = int(row[metric])
+ elif 'locks_count' in row:
+ if metric == row['mode']:
+ self.data[dimension_id] = row['locks_count']
+
+ def check_queries(self):
+ cursor = self.conn.cursor()
+
+ self.server_version = detect_server_version(cursor, query_factory(SERVER_VERSION))
+ self.debug('server version: {0}'.format(self.server_version))
+
+ self.is_superuser = check_if_superuser(cursor, query_factory(IF_SUPERUSER))
+ self.debug('superuser: {0}'.format(self.is_superuser))
+
+ self.databases = discover(cursor, query_factory(DATABASES))
+ self.debug('discovered databases {0}'.format(self.databases))
+ if self.databases_to_poll:
+ to_poll = self.databases_to_poll.split()
+ self.databases = [db for db in self.databases if db in to_poll] or self.databases
+
+ self.secondaries = discover(cursor, query_factory(STANDBY))
+ self.debug('discovered secondaries: {0}'.format(self.secondaries))
+
+ if self.server_version >= 94000:
+ self.replication_slots = discover(cursor, query_factory(REPLICATION_SLOT))
+ self.debug('discovered replication slots: {0}'.format(self.replication_slots))
+
+ cursor.close()
+
+ def populate_queries(self):
+ self.queries[query_factory(DATABASE)] = METRICS[DATABASE]
+ self.queries[query_factory(BACKENDS)] = METRICS[BACKENDS]
+ self.queries[query_factory(LOCKS)] = METRICS[LOCKS]
+ self.queries[query_factory(BGWRITER)] = METRICS[BGWRITER]
+ self.queries[query_factory(DIFF_LSN, self.server_version)] = METRICS[WAL_WRITES]
+ self.queries[query_factory(STANDBY_DELTA, self.server_version)] = METRICS[STANDBY_DELTA]
+
+ if self.do_index_stats:
+ self.queries[query_factory(INDEX_STATS)] = METRICS[INDEX_STATS]
+ if self.do_table_stats:
+ self.queries[query_factory(TABLE_STATS)] = METRICS[TABLE_STATS]
+
+ if self.is_superuser:
+ self.queries[query_factory(ARCHIVE, self.server_version)] = METRICS[ARCHIVE]
+
+ if self.server_version >= 90400:
+ self.queries[query_factory(WAL, self.server_version)] = METRICS[WAL]
+
+ if self.server_version >= 100000:
+ self.queries[query_factory(REPSLOT_FILES, self.server_version)] = METRICS[REPSLOT_FILES]
+
+ if self.server_version >= 90400:
+ self.queries[query_factory(AUTOVACUUM)] = METRICS[AUTOVACUUM]
+
+ def create_dynamic_charts(self):
+ for database_name in self.databases[::-1]:
+ dim = [
+ database_name + '_size',
+ database_name,
+ 'absolute',
+ 1,
+ 1024 * 1024,
+ ]
+ self.definitions['database_size']['lines'].append(dim)
+ for chart_name in [name for name in self.order if name.startswith('db_stat')]:
+ add_database_stat_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name=chart_name,
+ database_name=database_name,
+ )
+ add_database_lock_chart(
+ order=self.order,
+ definitions=self.definitions,
+ database_name=database_name,
+ )
+
+ for application_name in self.secondaries[::-1]:
+ add_replication_delta_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name='standby_delta',
+ application_name=application_name,
+ )
+
+ for slot_name in self.replication_slots[::-1]:
+ add_replication_slot_chart(
+ order=self.order,
+ definitions=self.definitions,
+ name='replication_slot',
+ slot_name=slot_name,
+ )
+
+
+def discover(cursor, query):
+ cursor.execute(query)
+ result = list()
+ for v in [value[0] for value in cursor]:
+ if v not in result:
+ result.append(v)
+ return result
+
+
+def check_if_superuser(cursor, query):
+ cursor.execute(query)
+ return cursor.fetchone()[0]
+
+
+def detect_server_version(cursor, query):
+ cursor.execute(query)
+ return int(cursor.fetchone()[0])
+
+
+def zero_lock_types(databases):
+ result = dict()
+ for database in databases:
+ for lock_type in METRICS['LOCKS']:
+ key = '_'.join([database, lock_type])
+ result[key] = 0
+
+ return result
+
+
+def hide_password(config):
+ return dict((k, v if k != 'password' else '*****') for k, v in config.items())
+
+
+def add_database_lock_chart(order, definitions, database_name):
+ def create_lines(database):
+ result = list()
+ for lock_type in METRICS['LOCKS']:
+ dimension_id = '_'.join([database, lock_type])
+ result.append([dimension_id, lock_type, 'absolute'])
+ return result
+
+ chart_name = database_name + '_locks'
+ order.insert(-1, chart_name)
+ definitions[chart_name] = {
+ 'options':
+ [None, 'Locks on db: ' + database_name, 'locks', 'db ' + database_name, 'postgres.db_locks', 'line'],
+ 'lines': create_lines(database_name)
+ }
+
+
+def add_database_stat_chart(order, definitions, name, database_name):
+ def create_lines(database, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([database, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([database_name, name])
+ order.insert(0, chart_name)
+ name, title, units, _, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type],
+ 'lines': create_lines(database_name, chart_template['lines'])}
+
+
+def add_replication_delta_chart(order, definitions, name, application_name):
+ def create_lines(standby, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([standby, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([application_name, name])
+ position = order.index('database_size')
+ order.insert(position, chart_name)
+ name, title, units, _, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type],
+ 'lines': create_lines(application_name, chart_template['lines'])}
+
+
+def add_replication_slot_chart(order, definitions, name, slot_name):
+ def create_lines(slot, lines):
+ result = list()
+ for line in lines:
+ new_line = ['_'.join([slot, line[0]])] + line[1:]
+ result.append(new_line)
+ return result
+
+ chart_template = CHARTS[name]
+ chart_name = '_'.join([slot_name, name])
+ position = order.index('database_size')
+ order.insert(position, chart_name)
+ name, title, units, _, context, chart_type = chart_template['options']
+ definitions[chart_name] = {
+ 'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type],
+ 'lines': create_lines(slot_name, chart_template['lines'])}
diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf
new file mode 100644
index 0000000..cde698f
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/postgres.conf
@@ -0,0 +1,124 @@
+# netdata python.d.plugin configuration for postgresql
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# A single connection is required in order to pull statistics.
+#
+# Connections can be configured with the following options:
+#
+# database : 'example_db_name'
+# user : 'example_user'
+# password : 'example_pass'
+# host : 'localhost'
+# port : 5432
+# connect_timeout : 2 # in seconds, default is 2
+# statement_timeout : 2000 # in ms, default is 2000
+#
+# Additionally, the following options allow selective disabling of charts
+#
+# table_stats : false
+# index_stats : false
+# database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts)
+#
+# Postgres permissions are configured at its pg_hba.conf file. You can
+# "trust" local clients to allow netdata to connect, or you can create
+# a postgres user for netdata and add its password below to allow
+# netdata connect.
+#
+# Postgres supported versions are :
+# - 9.3 (without autovacuum)
+# - 9.4
+# - 9.5
+# - 9.6
+# - 10
+#
+# Superuser access is needed for theses charts:
+# Write-Ahead Logs
+# Archive Write-Ahead Logs
+#
+# Autovacuum charts is allowed since Postgres 9.4
+# ----------------------------------------------------------------------
+
+socket:
+ name : 'local'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'local'
+ database : 'postgres'
+ user : 'postgres'
+ host : 'localhost'
+ port : 5432
+
+tcpipv4:
+ name : 'local'
+ database : 'postgres'
+ user : 'postgres'
+ host : '127.0.0.1'
+ port : 5432
+
+tcpipv6:
+ name : 'local'
+ database : 'postgres'
+ user : 'postgres'
+ host : '::1'
+ port : 5432
+
diff --git a/collectors/python.d.plugin/powerdns/Makefile.inc b/collectors/python.d.plugin/powerdns/Makefile.inc
new file mode 100644
index 0000000..256d32a
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += powerdns/powerdns.chart.py
+dist_pythonconfig_DATA += powerdns/powerdns.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += powerdns/README.md powerdns/Makefile.inc
+
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
new file mode 100644
index 0000000..61aa5f6
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -0,0 +1,79 @@
+# powerdns
+
+Module monitor powerdns performance and health metrics.
+
+Powerdns charts:
+
+1. **Queries and Answers**
+ * udp-queries
+ * udp-answers
+ * tcp-queries
+ * tcp-answers
+
+2. **Cache Usage**
+ * query-cache-hit
+ * query-cache-miss
+ * packetcache-hit
+ * packetcache-miss
+
+3. **Cache Size**
+ * query-cache-size
+ * packetcache-size
+ * key-cache-size
+ * meta-cache-size
+
+4. **Latency**
+ * latency
+
+ Powerdns Recursor charts:
+
+ 1. **Questions In**
+ * questions
+ * ipv6-questions
+ * tcp-queries
+
+2. **Questions Out**
+ * all-outqueries
+ * ipv6-outqueries
+ * tcp-outqueries
+ * throttled-outqueries
+
+3. **Answer Times**
+ * answers-slow
+ * answers0-1
+ * answers1-10
+ * answers10-100
+ * answers100-1000
+
+4. **Timeouts**
+ * outgoing-timeouts
+ * outgoing4-timeouts
+ * outgoing6-timeouts
+
+5. **Drops**
+ * over-capacity-drops
+
+6. **Cache Usage**
+ * cache-hits
+ * cache-misses
+ * packetcache-hits
+ * packetcache-misses
+
+7. **Cache Size**
+ * cache-entries
+ * packetcache-entries
+ * negcache-entries
+
+### configuration
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
+ header :
+ X-API-Key: 'change_me'
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpowerdns%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/powerdns/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
new file mode 100644
index 0000000..7ed1554
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+# Description: powerdns netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# Author: Luke Whitworth
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'questions',
+ 'cache_usage',
+ 'cache_size',
+ 'latency',
+]
+
+CHARTS = {
+ 'questions': {
+ 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
+ 'lines': [
+ ['udp-queries', None, 'incremental'],
+ ['udp-answers', None, 'incremental'],
+ ['tcp-queries', None, 'incremental'],
+ ['tcp-answers', None, 'incremental']
+ ]
+ },
+ 'cache_usage': {
+ 'options': [None, 'PowerDNS Cache Usage', 'count', 'cache', 'powerdns.cache_usage', 'line'],
+ 'lines': [
+ ['query-cache-hit', None, 'incremental'],
+ ['query-cache-miss', None, 'incremental'],
+ ['packetcache-hit', 'packet-cache-hit', 'incremental'],
+ ['packetcache-miss', 'packet-cache-miss', 'incremental']
+ ]
+ },
+ 'cache_size': {
+ 'options': [None, 'PowerDNS Cache Size', 'count', 'cache', 'powerdns.cache_size', 'line'],
+ 'lines': [
+ ['query-cache-size', None, 'absolute'],
+ ['packetcache-size', 'packet-cache-size', 'absolute'],
+ ['key-cache-size', None, 'absolute'],
+ ['meta-cache-size', None, 'absolute']
+ ]
+ },
+ 'latency': {
+ 'options': [None, 'PowerDNS Latency', 'microseconds', 'latency', 'powerdns.latency', 'line'],
+ 'lines': [
+ ['latency', None, 'absolute']
+ ]
+ }
+}
+
+RECURSOR_ORDER = ['questions-in', 'questions-out', 'answer-times', 'timeouts', 'drops', 'cache_usage', 'cache_size']
+
+RECURSOR_CHARTS = {
+ 'questions-in': {
+ 'options': [None, 'PowerDNS Recursor Questions In', 'count', 'questions', 'powerdns_recursor.questions-in',
+ 'line'],
+ 'lines': [
+ ['questions', None, 'incremental'],
+ ['ipv6-questions', None, 'incremental'],
+ ['tcp-questions', None, 'incremental']
+ ]
+ },
+ 'questions-out': {
+ 'options': [None, 'PowerDNS Recursor Questions Out', 'count', 'questions', 'powerdns_recursor.questions-out',
+ 'line'],
+ 'lines': [
+ ['all-outqueries', None, 'incremental'],
+ ['ipv6-outqueries', None, 'incremental'],
+ ['tcp-outqueries', None, 'incremental'],
+ ['throttled-outqueries', None, 'incremental']
+ ]
+ },
+ 'answer-times': {
+ 'options': [None, 'PowerDNS Recursor Answer Times', 'count', 'performance', 'powerdns_recursor.answer-times',
+ 'line'],
+ 'lines': [
+ ['answers-slow', None, 'incremental'],
+ ['answers0-1', None, 'incremental'],
+ ['answers1-10', None, 'incremental'],
+ ['answers10-100', None, 'incremental'],
+ ['answers100-1000', None, 'incremental']
+ ]
+ },
+ 'timeouts': {
+ 'options': [None, 'PowerDNS Recursor Questions Time', 'count', 'performance', 'powerdns_recursor.timeouts',
+ 'line'],
+ 'lines': [
+ ['outgoing-timeouts', None, 'incremental'],
+ ['outgoing4-timeouts', None, 'incremental'],
+ ['outgoing6-timeouts', None, 'incremental']
+ ]
+ },
+ 'drops': {
+ 'options': [None, 'PowerDNS Recursor Drops', 'count', 'performance', 'powerdns_recursor.drops', 'line'],
+ 'lines': [
+ ['over-capacity-drops', None, 'incremental']
+ ]
+ },
+ 'cache_usage': {
+ 'options': [None, 'PowerDNS Recursor Cache Usage', 'count', 'cache', 'powerdns_recursor.cache_usage', 'line'],
+ 'lines': [
+ ['cache-hits', None, 'incremental'],
+ ['cache-misses', None, 'incremental'],
+ ['packetcache-hits', 'packet-cache-hit', 'incremental'],
+ ['packetcache-misses', 'packet-cache-miss', 'incremental']
+ ]
+ },
+ 'cache_size': {
+ 'options': [None, 'PowerDNS Recursor Cache Size', 'count', 'cache', 'powerdns_recursor.cache_size', 'line'],
+ 'lines': [
+ ['cache-entries', None, 'absolute'],
+ ['packetcache-entries', None, 'absolute'],
+ ['negcache-entries', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def check(self):
+ self._manager = self._build_manager()
+ if not self._manager:
+ return None
+
+ d = self._get_data()
+ if not d:
+ return False
+
+ if is_recursor(d):
+ self.order = RECURSOR_ORDER
+ self.definitions = RECURSOR_CHARTS
+ self.module_name = 'powerdns_recursor'
+
+ return True
+
+ def _get_data(self):
+ data = self._get_raw_data()
+ if not data:
+ return None
+ return dict((d['name'], d['value']) for d in loads(data))
+
+
+def is_recursor(d):
+ return 'over-capacity-drops' in d and 'tcp-questions' in d
diff --git a/collectors/python.d.plugin/powerdns/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf
new file mode 100644
index 0000000..559bf17
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/powerdns.conf
@@ -0,0 +1,76 @@
+# netdata python.d.plugin configuration for powerdns
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, apache also supports the following:
+#
+# url: 'URL' # the URL to fetch powerdns performance statistics
+# header:
+# X-API-Key: 'Key' # API key
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+# localhost:
+# name : 'local'
+# url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
+# header:
+# X-API-Key: 'change_me'
diff --git a/collectors/python.d.plugin/proxysql/Makefile.inc b/collectors/python.d.plugin/proxysql/Makefile.inc
new file mode 100644
index 0000000..66be372
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += proxysql/proxysql.chart.py
+dist_pythonconfig_DATA += proxysql/proxysql.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += proxysql/README.md proxysql/Makefile.inc
+
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
new file mode 100644
index 0000000..6e5a212
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -0,0 +1,64 @@
+# proxysql
+
+This module monitors proxysql backend and frontend performance metrics.
+
+It produces:
+
+1. **Connections (frontend)**
+ * connected: number of frontend connections currently connected
+ * aborted: number of frontend connections aborted due to invalid credential or max_connections reached
+ * non_idle: number of frontend connections that are not currently idle
+ * created: number of frontend connections created
+2. **Questions (frontend)**
+ * questions: total number of queries sent from frontends
+ * slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time`
+3. **Overall Bandwith (backends)**
+ * in
+ * out
+4. **Status (backends)**
+ * Backends
+ * `1=ONLINE`: backend server is fully operational
+ * `2=SHUNNED`: backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold
+ * `3=OFFLINE_SOFT`: when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend
+ * `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work
+ * `-1`: Unknown status
+5. **Bandwith (backends)**
+ * Backends
+ * in
+ * out
+6. **Queries (backends)**
+ * Backends
+ * queries
+7. **Latency (backends)**
+ * Backends
+ * ping time
+8. **Pool connections (backends)**
+ * Backends
+ * Used: The number of connections are currently used by ProxySQL for sending queries to the backend server.
+ * Free: The number of connections are currently free.
+ * Established/OK: The number of connections were established successfully.
+ * Error: The number of connections weren't established successfully.
+9. **Commands**
+ * Commands
+ * Count
+ * Duration (Total duration for each command)
+10. **Commands Histogram**
+ * Commands
+ * 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one.
+
+### configuration
+
+```yaml
+tcpipv4:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '127.0.0.1'
+ port : '6032'
+```
+
+If no configuration is given, module will fail to run.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fproxysql%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py
new file mode 100644
index 0000000..c971474
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py
@@ -0,0 +1,351 @@
+# -*- coding: utf-8 -*-
+# Description: Proxysql netdata python.d module
+# Author: Ali Borhani (alibo)
+# SPDX-License-Identifier: GPL-3.0+
+
+from bases.FrameworkServices.MySQLService import MySQLService
+
+
+def query(table, *params):
+ return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
+
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_global
+QUERY_GLOBAL = query(
+ "stats_mysql_global",
+ "Variable_Name",
+ "Variable_Value"
+)
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_connection_pool
+QUERY_CONNECTION_POOL = query(
+ "stats_mysql_connection_pool",
+ "hostgroup",
+ "srv_host",
+ "srv_port",
+ "status",
+ "ConnUsed",
+ "ConnFree",
+ "ConnOK",
+ "ConnERR",
+ "Queries",
+ "Bytes_data_sent",
+ "Bytes_data_recv",
+ "Latency_us"
+)
+
+# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_commands_counters
+QUERY_COMMANDS = query(
+ "stats_mysql_commands_counters",
+ "Command",
+ "Total_Time_us",
+ "Total_cnt",
+ "cnt_100us",
+ "cnt_500us",
+ "cnt_1ms",
+ "cnt_5ms",
+ "cnt_10ms",
+ "cnt_50ms",
+ "cnt_100ms",
+ "cnt_500ms",
+ "cnt_1s",
+ "cnt_5s",
+ "cnt_10s",
+ "cnt_INFs"
+)
+
+GLOBAL_STATS = [
+ 'client_connections_aborted',
+ 'client_connections_connected',
+ 'client_connections_created',
+ 'client_connections_non_idle',
+ 'proxysql_uptime',
+ 'questions',
+ 'slow_queries'
+]
+
+CONNECTION_POOL_STATS = [
+ 'status',
+ 'connused',
+ 'connfree',
+ 'connok',
+ 'connerr',
+ 'queries',
+ 'bytes_data_sent',
+ 'bytes_data_recv',
+ 'latency_us'
+]
+
+ORDER = [
+ 'connections',
+ 'active_transactions',
+ 'questions',
+ 'pool_overall_net',
+ 'commands_count',
+ 'commands_duration',
+ 'pool_status',
+ 'pool_net',
+ 'pool_queries',
+ 'pool_latency',
+ 'pool_connection_used',
+ 'pool_connection_free',
+ 'pool_connection_ok',
+ 'pool_connection_error'
+]
+
+HISTOGRAM_ORDER = [
+ '100us',
+ '500us',
+ '1ms',
+ '5ms',
+ '10ms',
+ '50ms',
+ '100ms',
+ '500ms',
+ '1s',
+ '5s',
+ '10s',
+ 'inf'
+]
+
+STATUS = {
+ "ONLINE": 1,
+ "SHUNNED": 2,
+ "OFFLINE_SOFT": 3,
+ "OFFLINE_HARD": 4
+}
+
+CHARTS = {
+ 'pool_status': {
+ 'options': [None, 'ProxySQL Backend Status', 'status', 'status', 'proxysql.pool_status', 'line'],
+ 'lines': []
+ },
+ 'pool_net': {
+ 'options': [None, 'ProxySQL Backend Bandwidth', 'kilobits/s', 'bandwidth', 'proxysql.pool_net', 'area'],
+ 'lines': []
+ },
+ 'pool_overall_net': {
+ 'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth',
+ 'proxysql.pool_overall_net', 'area'],
+ 'lines': [
+ ['bytes_data_recv', 'in', 'incremental', 8, 1000],
+ ['bytes_data_sent', 'out', 'incremental', -8, 1000]
+ ]
+ },
+ 'questions': {
+ 'options': [None, 'ProxySQL Frontend Questions', 'questions/s', 'questions', 'proxysql.questions', 'line'],
+ 'lines': [
+ ['questions', 'questions', 'incremental'],
+ ['slow_queries', 'slow_queries', 'incremental']
+ ]
+ },
+ 'pool_queries': {
+ 'options': [None, 'ProxySQL Backend Queries', 'queries/s', 'queries', 'proxysql.queries', 'line'],
+ 'lines': []
+ },
+ 'active_transactions': {
+ 'options': [None, 'ProxySQL Frontend Active Transactions', 'transactions/s', 'active_transactions',
+ 'proxysql.active_transactions', 'line'],
+ 'lines': [
+ ['active_transactions', 'active_transactions', 'absolute']
+ ]
+ },
+ 'pool_latency': {
+ 'options': [None, 'ProxySQL Backend Latency', 'milliseconds', 'latency', 'proxysql.latency', 'line'],
+ 'lines': []
+ },
+ 'connections': {
+ 'options': [None, 'ProxySQL Frontend Connections', 'connections/s', 'connections', 'proxysql.connections',
+ 'line'],
+ 'lines': [
+ ['client_connections_connected', 'connected', 'absolute'],
+ ['client_connections_created', 'created', 'incremental'],
+ ['client_connections_aborted', 'aborted', 'incremental'],
+ ['client_connections_non_idle', 'non_idle', 'absolute']
+ ]
+ },
+ 'pool_connection_used': {
+ 'options': [None, 'ProxySQL Used Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_used_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_free': {
+ 'options': [None, 'ProxySQL Free Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_free_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_ok': {
+ 'options': [None, 'ProxySQL Established Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_ok_connections', 'line'],
+ 'lines': []
+ },
+ 'pool_connection_error': {
+ 'options': [None, 'ProxySQL Error Connections', 'connections', 'pool_connections',
+ 'proxysql.pool_error_connections', 'line'],
+ 'lines': []
+ },
+ 'commands_count': {
+ 'options': [None, 'ProxySQL Commands', 'commands', 'commands', 'proxysql.commands_count', 'line'],
+ 'lines': []
+ },
+ 'commands_duration': {
+ 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration', 'line'],
+ 'lines': []
+ }
+}
+
+
+class Service(MySQLService):
+ def __init__(self, configuration=None, name=None):
+ MySQLService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.queries = dict(
+ global_status=QUERY_GLOBAL,
+ connection_pool_status=QUERY_CONNECTION_POOL,
+ commands_status=QUERY_COMMANDS
+ )
+
+ def _get_data(self):
+ raw_data = self._get_raw_data(description=True)
+
+ if not raw_data:
+ return None
+
+ to_netdata = dict()
+
+ if 'global_status' in raw_data:
+ global_status = dict(raw_data['global_status'][0])
+ for key in global_status:
+ if key.lower() in GLOBAL_STATS:
+ to_netdata[key.lower()] = global_status[key]
+
+ if 'connection_pool_status' in raw_data:
+
+ to_netdata['bytes_data_recv'] = 0
+ to_netdata['bytes_data_sent'] = 0
+
+ for record in raw_data['connection_pool_status'][0]:
+ backend = self.generate_backend(record)
+ name = self.generate_backend_name(backend)
+
+ for key in backend:
+ if key in CONNECTION_POOL_STATS:
+ if key == 'status':
+ backend[key] = self.convert_status(backend[key])
+
+ if len(self.charts) > 0:
+ if (name + '_status') not in self.charts['pool_status']:
+ self.add_backend_dimensions(name)
+
+ to_netdata["{0}_{1}".format(name, key)] = backend[key]
+
+ if key == 'bytes_data_recv':
+ to_netdata['bytes_data_recv'] += int(backend[key])
+
+ if key == 'bytes_data_sent':
+ to_netdata['bytes_data_sent'] += int(backend[key])
+
+ if 'commands_status' in raw_data:
+ for record in raw_data['commands_status'][0]:
+ cmd = self.generate_command_stats(record)
+ name = cmd['name']
+
+ if len(self.charts) > 0:
+ if (name + '_count') not in self.charts['commands_count']:
+ self.add_command_dimensions(name)
+ self.add_histogram_chart(cmd)
+
+ to_netdata[name + '_count'] = cmd['count']
+ to_netdata[name + '_duration'] = cmd['duration']
+ for histogram in cmd['histogram']:
+ dimId = 'commands_histogram_{0}_{1}'.format(name, histogram)
+ to_netdata[dimId] = cmd['histogram'][histogram]
+
+ return to_netdata or None
+
+ def add_backend_dimensions(self, name):
+ self.charts['pool_status'].add_dimension([name + '_status', name, 'absolute'])
+ self.charts['pool_net'].add_dimension([name + '_bytes_data_recv', 'from_' + name, 'incremental', 8, 1024])
+ self.charts['pool_net'].add_dimension([name + '_bytes_data_sent', 'to_' + name, 'incremental', -8, 1024])
+ self.charts['pool_queries'].add_dimension([name + '_queries', name, 'incremental'])
+ self.charts['pool_latency'].add_dimension([name + '_latency_us', name, 'absolute', 1, 1000])
+ self.charts['pool_connection_used'].add_dimension([name + '_connused', name, 'absolute'])
+ self.charts['pool_connection_free'].add_dimension([name + '_connfree', name, 'absolute'])
+ self.charts['pool_connection_ok'].add_dimension([name + '_connok', name, 'incremental'])
+ self.charts['pool_connection_error'].add_dimension([name + '_connerr', name, 'incremental'])
+
+ def add_command_dimensions(self, cmd):
+ self.charts['commands_count'].add_dimension([cmd + '_count', cmd, 'incremental'])
+ self.charts['commands_duration'].add_dimension([cmd + '_duration', cmd, 'incremental', 1, 1000])
+
+ def add_histogram_chart(self, cmd):
+ chart = self.charts.add_chart(self.histogram_chart(cmd))
+
+ for histogram in HISTOGRAM_ORDER:
+ dimId = 'commands_histogram_{0}_{1}'.format(cmd['name'], histogram)
+ chart.add_dimension([dimId, histogram, 'incremental'])
+
+ @staticmethod
+ def histogram_chart(cmd):
+ return [
+ 'commands_historgram_' + cmd['name'],
+ None,
+ 'ProxySQL {0} Command Histogram'.format(cmd['name'].title()),
+ 'commands',
+ 'commands_histogram',
+ 'proxysql.commands_histogram_' + cmd['name'],
+ 'stacked'
+ ]
+
+ @staticmethod
+ def generate_backend(data):
+ return {
+ 'hostgroup': data[0],
+ 'srv_host': data[1],
+ 'srv_port': data[2],
+ 'status': data[3],
+ 'connused': data[4],
+ 'connfree': data[5],
+ 'connok': data[6],
+ 'connerr': data[7],
+ 'queries': data[8],
+ 'bytes_data_sent': data[9],
+ 'bytes_data_recv': data[10],
+ 'latency_us': data[11]
+ }
+
+ @staticmethod
+ def generate_command_stats(data):
+ return {
+ 'name': data[0].lower(),
+ 'duration': data[1],
+ 'count': data[2],
+ 'histogram': {
+ '100us': data[3],
+ '500us': data[4],
+ '1ms': data[5],
+ '5ms': data[6],
+ '10ms': data[7],
+ '50ms': data[8],
+ '100ms': data[9],
+ '500ms': data[10],
+ '1s': data[11],
+ '5s': data[12],
+ '10s': data[13],
+ 'inf': data[14]
+ }
+ }
+
+ @staticmethod
+ def generate_backend_name(backend):
+ hostgroup = backend['hostgroup'].replace(' ', '_').lower()
+ host = backend['srv_host'].replace('.', '_')
+
+ return "{0}_{1}_{2}".format(hostgroup, host, backend['srv_port'])
+
+ @staticmethod
+ def convert_status(status):
+ if status in STATUS:
+ return STATUS[status]
+ return -1
diff --git a/collectors/python.d.plugin/proxysql/proxysql.conf b/collectors/python.d.plugin/proxysql/proxysql.conf
new file mode 100644
index 0000000..3c503a8
--- /dev/null
+++ b/collectors/python.d.plugin/proxysql/proxysql.conf
@@ -0,0 +1,116 @@
+# netdata python.d.plugin configuration for ProxySQL
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, proxysql also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the proxysql username to use
+# pass: 'password' # the proxysql password to use
+#
+
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : 'localhost'
+ port : '6032'
+
+tcpipv4:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '127.0.0.1'
+ port : '6032'
+
+tcpipv6:
+ name : 'local'
+ user : 'stats'
+ pass : 'stats'
+ host : '::1'
+ port : '6032'
+
+tcp_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : 'localhost'
+ port : '6032'
+
+tcpipv4_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : '127.0.0.1'
+ port : '6032'
+
+tcpipv6_admin:
+ name : 'local'
+ user : 'admin'
+ pass : 'admin'
+ host : '::1'
+ port : '6032'
diff --git a/collectors/python.d.plugin/puppet/Makefile.inc b/collectors/python.d.plugin/puppet/Makefile.inc
new file mode 100644
index 0000000..fe94b92
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += puppet/puppet.chart.py
+dist_pythonconfig_DATA += puppet/puppet.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += puppet/README.md puppet/Makefile.inc
+
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
new file mode 100644
index 0000000..b97eb70
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -0,0 +1,47 @@
+# puppet
+
+Monitor status of Puppet Server and Puppet DB.
+
+Following charts are drawn:
+
+1. **JVM Heap**
+ * committed (allocated from OS)
+ * used (actual use)
+2. **JVM Non-Heap**
+ * committed (allocated from OS)
+ * used (actual use)
+3. **CPU Usage**
+ * execution
+ * GC (taken by garbage collection)
+4. **File Descriptors**
+ * max
+ * used
+
+
+### configuration
+
+```yaml
+puppetdb:
+ url: 'https://fqdn.example.com:8081'
+ tls_cert_file: /path/to/client.crt
+ tls_key_file: /path/to/client.key
+ autodetection_retry: 1
+
+puppetserver:
+ url: 'https://fqdn.example.com:8140'
+ autodetection_retry: 1
+```
+
+When no configuration is given, module uses `https://fqdn.example.com:8140`.
+
+### notes
+
+* Exact Fully Qualified Domain Name of the node should be used.
+* Usually Puppet Server/DB startup time is VERY long. So, there should
+ be quite reasonable retry count.
+* Secure PuppetDB config may require client certificate. Not applies
+ to default PuppetDB configuration though.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fpuppet%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
new file mode 100644
index 0000000..30e219d
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+# Description: puppet netdata python.d module
+# Author: Andrey Galkin <andrey@futoin.org> (andvgal)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This module should work both with OpenSource and PE versions
+# of PuppetServer and PuppetDB.
+#
+# NOTE: PuppetDB may be configured to require proper TLS
+# client certificate for security reasons. Use tls_key_file
+# and tls_cert_file options then.
+#
+
+import socket
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+update_every = 5
+
+
+MiB = 1 << 20
+CPU_SCALE = 1000
+
+ORDER = [
+ 'jvm_heap',
+ 'jvm_nonheap',
+ 'cpu',
+ 'fd_open',
+]
+
+CHARTS = {
+ 'jvm_heap': {
+ 'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_heap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_heap_used', 'used', 'absolute', 1, MiB],
+ ],
+ 'variables': [
+ ['jvm_heap_max'],
+ ['jvm_heap_init'],
+ ],
+ },
+ 'jvm_nonheap': {
+ 'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_nonheap_used', 'used', 'absolute', 1, MiB],
+ ],
+ 'variables': [
+ ['jvm_nonheap_max'],
+ ['jvm_nonheap_init'],
+ ],
+ },
+ 'cpu': {
+ 'options': [None, 'CPU usage', 'percentage', 'resources', 'puppet.cpu', 'stacked'],
+ 'lines': [
+ ['cpu_time', 'execution', 'absolute', 1, CPU_SCALE],
+ ['gc_time', 'GC', 'absolute', 1, CPU_SCALE],
+ ]
+ },
+ 'fd_open': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'resources', 'puppet.fdopen', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute'],
+ ],
+ 'variables': [
+ ['fd_max'],
+ ],
+ },
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = 'https://{0}:8140'.format(socket.getfqdn())
+
+ def _get_data(self):
+ # NOTE: there are several ways to retrieve data
+ # 1. Only PE versions:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html
+ # 2. Inidividual Metrics API (JMX):
+ # https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html
+ # 3. Extended status at debug level:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html
+ #
+ # For sake of simplicity and efficiency the status one is used..
+
+ raw_data = self._get_raw_data(self.url + '/status/v1/services?level=debug')
+
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+ data = {}
+
+ try:
+ try:
+ jvm_metrics = raw_data['status-service']['status']['experimental']['jvm-metrics']
+ except KeyError:
+ jvm_metrics = raw_data['status-service']['status']['jvm-metrics']
+
+ heap_mem = jvm_metrics['heap-memory']
+ non_heap_mem = jvm_metrics['non-heap-memory']
+
+ for k in ['max', 'committed', 'used', 'init']:
+ data['jvm_heap_'+k] = heap_mem[k]
+ data['jvm_nonheap_'+k] = non_heap_mem[k]
+
+ fd_open = jvm_metrics['file-descriptors']
+ data['fd_max'] = fd_open['max']
+ data['fd_used'] = fd_open['used']
+
+ data['cpu_time'] = int(jvm_metrics['cpu-usage'] * CPU_SCALE)
+ data['gc_time'] = int(jvm_metrics['gc-cpu-usage'] * CPU_SCALE)
+ except KeyError:
+ pass
+
+ return data or None
diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf
new file mode 100644
index 0000000..ff5c3d0
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.conf
@@ -0,0 +1,94 @@
+# netdata python.d.plugin configuration for Puppet Server and Puppet DB
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# These configuration comes from UrlService base:
+# url: # HTTP or HTTPS URL
+# tls_verify: False # Control HTTPS server certificate verification
+# tls_ca_file: # Optional CA (bundle) file to use
+# tls_cert_file: # Optional client certificate file
+# tls_key_file: # Optional client key file
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+# puppet:
+# url: 'https://<FQDN>:8140'
+#
+
+#
+# Production configuration should look like below.
+#
+# NOTE: usually Puppet Server/DB startup time is VERY long. So, there should
+# be quite reasonable retry count.
+#
+# NOTE: secure PuppetDB config may require client certificate.
+# Not applies to default PuppetDB configuration though.
+#
+# puppetdb:
+# url: 'https://fqdn.example.com:8081'
+# tls_cert_file: /path/to/client.crt
+# tls_key_file: /path/to/client.key
+# autodetection_retry: 1
+#
+# puppetserver:
+# url: 'https://fqdn.example.com:8140'
+# autodetection_retry: 1
+#
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
new file mode 100644
index 0000000..7223620
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.conf
@@ -0,0 +1,106 @@
+# netdata python.d.plugin configuration
+#
+# This file is in YaML format.
+# Generally the format is:
+#
+# name: value
+#
+
+# Enable / disable the whole python.d.plugin (all its modules)
+enabled: yes
+
+# ----------------------------------------------------------------------
+# Enable / Disable python.d.plugin modules
+#default_run: yes
+#
+# If "default_run" = "yes" the default for all modules is enabled (yes).
+# Setting any of these to "no" will disable it.
+#
+# If "default_run" = "no" the default for all modules is disabled (no).
+# Setting any of these to "yes" will enable it.
+
+# Enable / Disable explicit garbage collection (full collection run). Default is enabled.
+gc_run: yes
+
+# Garbage collection interval in seconds. Default is 300.
+gc_interval: 300
+
+# apache: yes
+
+# apache_cache has been replaced by web_log
+# adaptec_raid: yes
+apache_cache: no
+# beanstalk: yes
+# bind_rndc: yes
+# boinc: yes
+# ceph: yes
+chrony: no
+# couchdb: yes
+# cpufreq: yes
+# cpuidle: yes
+# dns_query_time: yes
+# dnsdist: yes
+# dockerd: yes
+# dovecot: yes
+# elasticsearch: yes
+
+# this is just an example
+example: no
+
+# exim: yes
+# fail2ban: yes
+# freeradius: yes
+go_expvar: no
+
+# gunicorn_log has been replaced by web_log
+gunicorn_log: no
+# haproxy: yes
+# hddtemp: yes
+# httpcheck: yes
+# icecast: yes
+# ipfs: yes
+# isc_dhcpd: yes
+# linux_power_supply: yes
+# litespeed: yes
+logind: no
+# mdstat: yes
+# megacli: yes
+# memcached: yes
+# mongodb: yes
+# monit: yes
+# mysql: yes
+# nginx: yes
+# nginx_plus: yes
+# nvidia_smi: yes
+
+# nginx_log has been replaced by web_log
+nginx_log: no
+# nsd: yes
+# ntpd: yes
+# openldap: yes
+# ovpn_status_log: yes
+# phpfpm: yes
+# portcheck: yes
+# postfix: yes
+# postgres: yes
+# powerdns: yes
+# proxysql: yes
+# puppet: yes
+# rabbitmq: yes
+# redis: yes
+# rethinkdbs: yes
+# retroshare: yes
+# samba: yes
+# sensors: yes
+# smartd_log: yes
+# spigotmc: yes
+# springboot: yes
+# squid: yes
+# traefik: yes
+# tomcat: yes
+# tor: yes
+unbound: no
+# uwsgi: yes
+# varnish: yes
+# w1sensor: yes
+# web_log: yes \ No newline at end of file
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
new file mode 100644
index 0000000..6521fed
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -0,0 +1,427 @@
+#!/usr/bin/env bash
+'''':; exec "$(command -v python || command -v python3 || command -v python2 ||
+echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
+
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import gc
+import os
+import sys
+import threading
+
+from re import sub
+from sys import version_info, argv
+from time import sleep
+
+GC_RUN = True
+GC_COLLECT_EVERY = 300
+
+PY_VERSION = version_info[:2]
+
+USER_CONFIG_DIR = os.getenv('NETDATA_USER_CONFIG_DIR', '@configdir_POST@')
+STOCK_CONFIG_DIR = os.getenv('NETDATA_STOCK_CONFIG_DIR', '@libconfigdir_POST@')
+
+PLUGINS_USER_CONFIG_DIR = os.path.join(USER_CONFIG_DIR, 'python.d')
+PLUGINS_STOCK_CONFIG_DIR = os.path.join(STOCK_CONFIG_DIR, 'python.d')
+
+
+PLUGINS_DIR = os.path.abspath(os.getenv(
+ 'NETDATA_PLUGINS_DIR',
+ os.path.dirname(__file__)) + '/../python.d')
+
+
+PYTHON_MODULES_DIR = os.path.join(PLUGINS_DIR, 'python_modules')
+
+sys.path.append(PYTHON_MODULES_DIR)
+
+from bases.loaders import ModuleAndConfigLoader # noqa: E402
+from bases.loggers import PythonDLogger # noqa: E402
+from bases.collection import setdefault_values, run_and_exit # noqa: E402
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+BASE_CONFIG = {'update_every': os.getenv('NETDATA_UPDATE_EVERY', 1),
+ 'priority': 60000,
+ 'autodetection_retry': 0,
+ 'chart_cleanup': 10,
+ 'penalty': True,
+ 'name': str()}
+
+
+MODULE_EXTENSION = '.chart.py'
+OBSOLETE_MODULES = ['apache_cache', 'gunicorn_log', 'nginx_log', 'cpufreq', 'cpuidle', 'mdstat', 'linux_power_supply']
+
+
+def module_ok(m):
+ return m.endswith(MODULE_EXTENSION) and m[:-len(MODULE_EXTENSION)] not in OBSOLETE_MODULES
+
+
+ALL_MODULES = [m for m in sorted(os.listdir(PLUGINS_DIR)) if module_ok(m)]
+
+
+def parse_cmd():
+ debug = 'debug' in argv[1:]
+ trace = 'trace' in argv[1:]
+ override_update_every = next((arg for arg in argv[1:] if arg.isdigit() and int(arg) > 1), False)
+ modules = [''.join([m, MODULE_EXTENSION]) for m in argv[1:] if ''.join([m, MODULE_EXTENSION]) in ALL_MODULES]
+ return debug, trace, override_update_every, modules or ALL_MODULES
+
+
+def multi_job_check(config):
+ return next((True for key in config if isinstance(config[key], dict)), False)
+
+
+class RawModule:
+ def __init__(self, name, path, explicitly_enabled=True):
+ self.name = name
+ self.path = path
+ self.explicitly_enabled = explicitly_enabled
+
+
+class Job(object):
+ def __init__(self, initialized_job, job_id):
+ """
+ :param initialized_job: instance of <Class Service>
+ :param job_id: <str>
+ """
+ self.job = initialized_job
+ self.id = job_id # key in Modules.jobs()
+ self.module_name = self.job.__module__ # used in Plugin.delete_job()
+ self.recheck_every = self.job.configuration.pop('autodetection_retry')
+ self.checked = False # used in Plugin.check_job()
+ self.created = False # used in Plugin.create_job_charts()
+ if self.job.update_every < int(OVERRIDE_UPDATE_EVERY):
+ self.job.update_every = int(OVERRIDE_UPDATE_EVERY)
+
+ def __getattr__(self, item):
+ return getattr(self.job, item)
+
+ def __repr__(self):
+ return self.job.__repr__()
+
+ def is_dead(self):
+ return bool(self.ident) and not self.is_alive()
+
+ def not_launched(self):
+ return not bool(self.ident)
+
+ def is_autodetect(self):
+ return self.recheck_every
+
+
+class Module(object):
+ def __init__(self, service, config):
+ """
+ :param service: <Module>
+ :param config: <dict>
+ """
+ self.service = service
+ self.name = service.__name__
+ self.config = self.jobs_configurations_builder(config)
+ self.jobs = OrderedDict()
+ self.counter = 1
+
+ self.initialize_jobs()
+
+ def __repr__(self):
+ return "<Class Module '{name}'>".format(name=self.name)
+
+ def __iter__(self):
+ return iter(OrderedDict(self.jobs).values())
+
+ def __getitem__(self, item):
+ return self.jobs[item]
+
+ def __delitem__(self, key):
+ del self.jobs[key]
+
+ def __len__(self):
+ return len(self.jobs)
+
+ def __bool__(self):
+ return bool(self.jobs)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def jobs_configurations_builder(self, config):
+ """
+ :param config: <dict>
+ :return:
+ """
+ counter = 0
+ job_base_config = dict()
+
+ for attr in BASE_CONFIG:
+ job_base_config[attr] = config.pop(attr, getattr(self.service, attr, BASE_CONFIG[attr]))
+
+ if not config:
+ config = {str(): dict()}
+ elif not multi_job_check(config):
+ config = {str(): config}
+
+ for job_name in config:
+ if not isinstance(config[job_name], dict):
+ continue
+
+ job_config = setdefault_values(config[job_name], base_dict=job_base_config)
+ job_name = sub(r'\s+', '_', job_name)
+ config[job_name]['name'] = sub(r'\s+', '_', config[job_name]['name'])
+ counter += 1
+ job_id = 'job' + str(counter).zfill(3)
+
+ yield job_id, job_name, job_config
+
+ def initialize_jobs(self):
+ """
+ :return:
+ """
+ for job_id, job_name, job_config in self.config:
+ job_config['job_name'] = job_name
+ job_config['override_name'] = job_config.pop('name')
+
+ try:
+ initialized_job = self.service.Service(configuration=job_config)
+ except Exception as error:
+ Logger.error("job initialization: '{module_name} {job_name}' "
+ "=> ['FAILED'] ({error})".format(module_name=self.name,
+ job_name=job_name,
+ error=error))
+ continue
+ else:
+ Logger.debug("job initialization: '{module_name} {job_name}' "
+ "=> ['OK']".format(module_name=self.name,
+ job_name=job_name or self.name))
+ self.jobs[job_id] = Job(initialized_job=initialized_job,
+ job_id=job_id)
+ del self.config
+ del self.service
+
+
+class Plugin(object):
+ def __init__(self):
+ self.loader = ModuleAndConfigLoader()
+ self.modules = OrderedDict()
+ self.sleep_time = 1
+ self.runs_counter = 0
+
+ user_config = os.path.join(USER_CONFIG_DIR, 'python.d.conf')
+ stock_config = os.path.join(STOCK_CONFIG_DIR, 'python.d.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ self.config, error = self.loader.load_config_from_file(user_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ self.config, error = self.loader.load_config_from_file(stock_config)
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ self.do_gc = self.config.get("gc_run", GC_RUN)
+ self.gc_interval = self.config.get("gc_interval", GC_COLLECT_EVERY)
+
+ if not self.config.get('enabled', True):
+ run_and_exit(Logger.info)('DISABLED in configuration file.')
+
+ self.load_and_initialize_modules()
+ if not self.modules:
+ run_and_exit(Logger.info)('No modules to run. Exit...')
+
+ def __iter__(self):
+ return iter(OrderedDict(self.modules).values())
+
+ @property
+ def jobs(self):
+ return (job for mod in self for job in mod)
+
+ @property
+ def dead_jobs(self):
+ return (job for job in self.jobs if job.is_dead())
+
+ @property
+ def autodetect_jobs(self):
+ return [job for job in self.jobs if job.not_launched()]
+
+ def enabled_modules(self):
+ for mod in MODULES_TO_RUN:
+ mod_name = mod[:-len(MODULE_EXTENSION)]
+ mod_path = os.path.join(PLUGINS_DIR, mod)
+ if any(
+ [
+ self.config.get('default_run', True) and self.config.get(mod_name, True),
+ (not self.config.get('default_run')) and self.config.get(mod_name),
+ ]
+ ):
+ yield RawModule(
+ name=mod_name,
+ path=mod_path,
+ explicitly_enabled=self.config.get(mod_name),
+ )
+
+ def load_and_initialize_modules(self):
+ for mod in self.enabled_modules():
+
+ # Load module from file ------------------------------------------------------------
+ loaded_module, error = self.loader.load_module_from_file(mod.name, mod.path)
+ log = Logger.error if error else Logger.debug
+ log("module load source: '{module_name}' => [{status}]".format(status='FAILED' if error else 'OK',
+ module_name=mod.name))
+ if error:
+ Logger.error("load source error : {0}".format(error))
+ continue
+
+ # Load module config from file ------------------------------------------------------
+ user_config = os.path.join(PLUGINS_USER_CONFIG_DIR, mod.name + '.conf')
+ stock_config = os.path.join(PLUGINS_STOCK_CONFIG_DIR, mod.name + '.conf')
+
+ Logger.debug("loading '{0}'".format(user_config))
+ loaded_config, error = self.loader.load_config_from_file(user_config)
+ if error:
+ Logger.error("cannot load '{0}' : {1}. Will try stock version.".format(user_config, error))
+ Logger.debug("loading '{0}'".format(stock_config))
+ loaded_config, error = self.loader.load_config_from_file(stock_config)
+
+ if error:
+ Logger.error("cannot load '{0}': {1}".format(stock_config, error))
+
+ # Skip disabled modules
+ if getattr(loaded_module, 'disabled_by_default', False) and not mod.explicitly_enabled:
+ Logger.info("module '{0}' disabled by default".format(loaded_module.__name__))
+ continue
+
+ # Module initialization ---------------------------------------------------
+
+ initialized_module = Module(service=loaded_module, config=loaded_config)
+ Logger.debug("module status: '{module_name}' => [{status}] "
+ "(jobs: {jobs_number})".format(status='OK' if initialized_module else 'FAILED',
+ module_name=initialized_module.name,
+ jobs_number=len(initialized_module)))
+ if initialized_module:
+ self.modules[initialized_module.name] = initialized_module
+
+ @staticmethod
+ def check_job(job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ try:
+ check_ok = bool(job.check())
+ except Exception as error:
+ job.error('check() unhandled exception: {error}'.format(error=error))
+ return None
+ else:
+ return check_ok
+
+ @staticmethod
+ def create_job_charts(job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ try:
+ create_ok = job.create()
+ except Exception as error:
+ job.error('create() unhandled exception: {error}'.format(error=error))
+ return False
+ else:
+ return create_ok
+
+ def delete_job(self, job):
+ """
+ :param job: <Job>
+ :return:
+ """
+ del self.modules[job.module_name][job.id]
+
+ def run_check(self):
+ checked = list()
+ for job in self.jobs:
+ if job.name in checked:
+ job.info('check() => [DROPPED] (already served by another job)')
+ self.delete_job(job)
+ continue
+ ok = self.check_job(job)
+ if ok:
+ job.info('check() => [OK]')
+ checked.append(job.name)
+ job.checked = True
+ continue
+ if not job.is_autodetect() or ok is None:
+ job.info('check() => [FAILED]')
+ self.delete_job(job)
+ else:
+ job.info('check() => [RECHECK] (autodetection_retry: {0})'.format(job.recheck_every))
+
+ def run_create(self):
+ for job in self.jobs:
+ if not job.checked:
+ # skip autodetection_retry jobs
+ continue
+ ok = self.create_job_charts(job)
+ if ok:
+ job.debug('create() => [OK] (charts: {0})'.format(len(job.charts)))
+ job.created = True
+ continue
+ job.error('create() => [FAILED] (charts: {0})'.format(len(job.charts)))
+ self.delete_job(job)
+
+ def start(self):
+ self.run_check()
+ self.run_create()
+ for job in self.jobs:
+ if job.created:
+ job.start()
+
+ while True:
+ if threading.active_count() <= 1 and not self.autodetect_jobs:
+ run_and_exit(Logger.info)('FINISHED')
+
+ sleep(self.sleep_time)
+ self.cleanup()
+ self.autodetect_retry()
+
+ # FIXME: https://github.com/netdata/netdata/issues/3817
+ if self.do_gc and self.runs_counter % self.gc_interval == 0:
+ v = gc.collect()
+ Logger.debug("GC full collection run result: {0}".format(v))
+
+ def cleanup(self):
+ for job in self.dead_jobs:
+ self.delete_job(job)
+ for mod in self:
+ if not mod:
+ del self.modules[mod.name]
+
+ def autodetect_retry(self):
+ self.runs_counter += self.sleep_time
+ for job in self.autodetect_jobs:
+ if self.runs_counter % job.recheck_every == 0:
+ checked = self.check_job(job)
+ if checked:
+ created = self.create_job_charts(job)
+ if not created:
+ self.delete_job(job)
+ continue
+ job.start()
+
+
+if __name__ == '__main__':
+ DEBUG, TRACE, OVERRIDE_UPDATE_EVERY, MODULES_TO_RUN = parse_cmd()
+ Logger = PythonDLogger()
+ if DEBUG:
+ Logger.logger.severity = 'DEBUG'
+ if TRACE:
+ Logger.log_traceback = True
+ Logger.info('Using python {version}'.format(version=PY_VERSION[0]))
+
+ plugin = Plugin()
+ plugin.start()
diff --git a/collectors/python.d.plugin/python_modules/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
new file mode 100644
index 0000000..72f9ff7
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from subprocess import Popen, PIPE
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
+
+
+class ExecutableService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.command = None
+
+ def _get_raw_data(self, stderr=False, command=None):
+ """
+ Get raw data from executed command
+ :return: <list>
+ """
+ try:
+ p = Popen(command if command else self.command, stdout=PIPE, stderr=PIPE)
+ except Exception as error:
+ self.error('Executing command {command} resulted in error: {error}'.format(command=command or self.command,
+ error=error))
+ return None
+ data = list()
+ std = p.stderr if stderr else p.stdout
+ for line in std:
+ try:
+ data.append(line.decode('utf-8'))
+ except TypeError:
+ continue
+
+ return data
+
+ def check(self):
+ """
+ Parse basic configuration, check if command is whitelisted and is returning values
+ :return: <boolean>
+ """
+ # Preference: 1. "command" from configuration file 2. "command" from plugin (if specified)
+ if 'command' in self.configuration:
+ self.command = self.configuration['command']
+
+ # "command" must be: 1.not None 2. type <str>
+ if not (self.command and isinstance(self.command, str)):
+ self.error('Command is not defined or command type is not <str>')
+ return False
+
+ # Split "command" into: 1. command <str> 2. options <list>
+ command, opts = self.command.split()[0], self.command.split()[1:]
+
+ # Check for "bad" symbols in options. No pipes, redirects etc.
+ opts_list = ['&', '|', ';', '>', '<']
+ bad_opts = set(''.join(opts)) & set(opts_list)
+ if bad_opts:
+ self.error("Bad command argument(s): {opts}".format(opts=bad_opts))
+ return False
+
+ # Find absolute path ('echo' => '/bin/echo')
+ if '/' not in command:
+ command = find_binary(command)
+ if not command:
+ self.error('Can\'t locate "{command}" binary'.format(command=self.command))
+ return False
+ # Check if binary exist and executable
+ else:
+ if not os.access(command, os.X_OK):
+ self.error('"{binary}" is not executable'.format(binary=command))
+ return False
+
+ self.command = [command] + opts if opts else [command]
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Command: {command}. Error: {error}'.format(command=self.command,
+ error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error('Command "{command}" returned no data'.format(command=self.command))
+ return False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
new file mode 100644
index 0000000..5acfd73
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from glob import glob
+import os
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class LogService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.log_path = self.configuration.get('path')
+ self.__glob_path = self.log_path
+ self._last_position = 0
+ self.__re_find = dict(current=0, run=0, maximum=60)
+
+ def _get_raw_data(self):
+ """
+ Get log lines since last poll
+ :return: list
+ """
+ lines = list()
+ try:
+ if self.__re_find['current'] == self.__re_find['run']:
+ self._find_recent_log_file()
+ size = os.path.getsize(self.log_path)
+ if size == self._last_position:
+ self.__re_find['current'] += 1
+ return list() # return empty list if nothing has changed
+ elif size < self._last_position:
+ self._last_position = 0 # read from beginning if file has shrunk
+
+ with open(self.log_path) as fp:
+ fp.seek(self._last_position)
+ for line in fp:
+ lines.append(line)
+ self._last_position = fp.tell()
+ self.__re_find['current'] = 0
+ except (OSError, IOError) as error:
+ self.__re_find['current'] += 1
+ self.error(str(error))
+
+ return lines or None
+
+ def _find_recent_log_file(self):
+ """
+ :return:
+ """
+ self.__re_find['run'] = self.__re_find['maximum']
+ self.__re_find['current'] = 0
+ self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files
+ path_list = glob(self.__glob_path)
+ if path_list:
+ self.log_path = max(path_list)
+ return True
+ return False
+
+ def check(self):
+ """
+ Parse basic configuration and check if log file exists
+ :return: boolean
+ """
+ if not self.log_path:
+ self.error('No path to log specified')
+ return None
+
+ if self._find_recent_log_file() and os.access(self.log_path, os.R_OK) and os.path.isfile(self.log_path):
+ return True
+ self.error('Cannot access {0}'.format(self.log_path))
+ return False
+
+ def create(self):
+ # set cursor at last byte of log file
+ self._last_position = os.path.getsize(self.log_path)
+ status = SimpleService.create(self)
+ return status
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
new file mode 100644
index 0000000..9a694aa
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from sys import exc_info
+
+try:
+ import MySQLdb
+
+ PY_MYSQL = True
+except ImportError:
+ try:
+ import pymysql as MySQLdb
+
+ PY_MYSQL = True
+ except ImportError:
+ PY_MYSQL = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class MySQLService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.__connection = None
+ self.__conn_properties = dict()
+ self.extra_conn_properties = dict()
+ self.__queries = self.configuration.get('queries', dict())
+ self.queries = dict()
+
+ def __connect(self):
+ try:
+ connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties)
+ except (MySQLdb.MySQLError, TypeError, AttributeError) as error:
+ return None, str(error)
+ else:
+ return connection, None
+
+ def check(self):
+ def get_connection_properties(conf, extra_conf):
+ properties = dict()
+ if conf.get('user'):
+ properties['user'] = conf['user']
+ if conf.get('pass'):
+ properties['passwd'] = conf['pass']
+ if conf.get('socket'):
+ properties['unix_socket'] = conf['socket']
+ elif conf.get('host'):
+ properties['host'] = conf['host']
+ properties['port'] = int(conf.get('port', 3306))
+ elif conf.get('my.cnf'):
+ if MySQLdb.__name__ == 'pymysql':
+ self.error('"my.cnf" parsing is not working for pymysql')
+ else:
+ properties['read_default_file'] = conf['my.cnf']
+ if isinstance(extra_conf, dict) and extra_conf:
+ properties.update(extra_conf)
+
+ return properties or None
+
+ def is_valid_queries_dict(raw_queries, log_error):
+ """
+ :param raw_queries: dict:
+ :param log_error: function:
+ :return: dict or None
+
+ raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries)
+ """
+
+ def is_valid_query(query):
+ return all([isinstance(query, str),
+ query.startswith(('SELECT', 'select', 'SHOW', 'show'))])
+
+ if hasattr(raw_queries, 'keys') and raw_queries:
+ valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)])
+ bad_queries = set(raw_queries) - set(valid_queries)
+
+ if bad_queries:
+ log_error('Removed query(s): {queries}'.format(queries=bad_queries))
+ return valid_queries
+ else:
+ log_error('Unsupported "queries" format. Must be not empty <dict>')
+ return None
+
+ if not PY_MYSQL:
+ self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin')
+ return False
+
+ # Preference: 1. "queries" from the configuration file 2. "queries" from the module
+ self.queries = self.__queries or self.queries
+ # Check if "self.queries" exist, not empty and all queries are in valid format
+ self.queries = is_valid_queries_dict(self.queries, self.error)
+ if not self.queries:
+ return None
+
+ # Get connection properties
+ self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties)
+ if not self.__conn_properties:
+ self.error('Connection properties are missing')
+ return False
+
+ # Create connection to the database
+ self.__connection, error = self.__connect()
+ if error:
+ self.error('Can\'t establish connection to MySQL: {error}'.format(error=error))
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Error: {error}'.format(error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error("_get_data() returned no data or type is not <dict>")
+ return False
+
+ def _get_raw_data(self, description=None):
+ """
+ Get raw data from MySQL server
+ :return: dict: fetchall() or (fetchall(), description)
+ """
+
+ if not self.__connection:
+ self.__connection, error = self.__connect()
+ if error:
+ return None
+
+ raw_data = dict()
+ queries = dict(self.queries)
+ try:
+ cursor = self.__connection.cursor()
+ for name, query in queries.items():
+ try:
+ cursor.execute(query)
+ except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
+ if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
+ cursor.close()
+ raise RuntimeError
+ self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
+ query=query,
+ error=error))
+ self.queries.pop(name)
+ continue
+ else:
+ raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ cursor.close()
+ self.__connection.commit()
+ except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
+ self.__connection.close()
+ self.__connection = None
+ return None
+ else:
+ return raw_data or None
+
+ @staticmethod
+ def __is_error_critical(err_class, err_text):
+ return err_class == MySQLdb.OperationalError and all(['denied' not in err_text,
+ 'Unknown column' not in err_text])
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
new file mode 100644
index 0000000..c7ab7f2
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from threading import Thread
+from time import sleep, time
+
+from third_party.monotonic import monotonic
+
+from bases.charts import Charts, ChartError, create_runtime_chart
+from bases.collection import OldVersionCompatibility, safe_print
+from bases.loggers import PythonDLimitedLogger
+
+RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \
+ 'SET run_time = {elapsed}\n' \
+ 'END\n'
+
+PENALTY_EVERY = 5
+MAX_PENALTY = 10 * 60 # 10 minutes
+
+
+class RuntimeCounters:
+ def __init__(self, configuration):
+ """
+ :param configuration: <dict>
+ """
+ self.update_every = int(configuration.pop('update_every'))
+ self.do_penalty = configuration.pop('penalty')
+
+ self.start_mono = 0
+ self.start_real = 0
+ self.retries = 0
+ self.penalty = 0
+ self.elapsed = 0
+ self.prev_update = 0
+
+ self.runs = 1
+
+ def calc_next(self):
+ self.start_mono = monotonic()
+ return self.start_mono - (self.start_mono % self.update_every) + self.update_every + self.penalty
+
+ def sleep_until_next(self):
+ next_time = self.calc_next()
+ while self.start_mono < next_time:
+ sleep(next_time - self.start_mono)
+ self.start_mono = monotonic()
+ self.start_real = time()
+
+ def handle_retries(self):
+ self.retries += 1
+ if self.do_penalty and self.retries % PENALTY_EVERY == 0:
+ self.penalty = round(min(self.retries * self.update_every / 2, MAX_PENALTY))
+
+
+class SimpleService(Thread, PythonDLimitedLogger, OldVersionCompatibility, object):
+ """
+ Prototype of Service class.
+ Implemented basic functionality to run jobs by `python.d.plugin`
+ """
+ def __init__(self, configuration, name=''):
+ """
+ :param configuration: <dict>
+ :param name: <str>
+ """
+ Thread.__init__(self)
+ self.daemon = True
+ PythonDLimitedLogger.__init__(self)
+ OldVersionCompatibility.__init__(self)
+ self.configuration = configuration
+ self.order = list()
+ self.definitions = dict()
+
+ self.module_name = self.__module__
+ self.job_name = configuration.pop('job_name')
+ self.override_name = configuration.pop('override_name')
+ self.fake_name = None
+
+ self._runtime_counters = RuntimeCounters(configuration=configuration)
+ self.charts = Charts(job_name=self.actual_name,
+ priority=configuration.pop('priority'),
+ cleanup=configuration.pop('chart_cleanup'),
+ get_update_every=self.get_update_every,
+ module_name=self.module_name)
+
+ def __repr__(self):
+ return '<{cls_bases}: {name}>'.format(cls_bases=', '.join(c.__name__ for c in self.__class__.__bases__),
+ name=self.name)
+
+ @property
+ def name(self):
+ if self.job_name:
+ return '_'.join([self.module_name, self.override_name or self.job_name])
+ return self.module_name
+
+ def actual_name(self):
+ return self.fake_name or self.name
+
+ @property
+ def runs_counter(self):
+ return self._runtime_counters.runs
+
+ @property
+ def update_every(self):
+ return self._runtime_counters.update_every
+
+ @update_every.setter
+ def update_every(self, value):
+ """
+ :param value: <int>
+ :return:
+ """
+ self._runtime_counters.update_every = value
+
+ def get_update_every(self):
+ return self.update_every
+
+ def check(self):
+ """
+ check() prototype
+ :return: boolean
+ """
+ self.debug("job doesn't implement check() method. Using default which simply invokes get_data().")
+ data = self.get_data()
+ if data and isinstance(data, dict):
+ return True
+ self.debug('returned value is wrong: {0}'.format(data))
+ return False
+
+ @create_runtime_chart
+ def create(self):
+ for chart_name in self.order:
+ chart_config = self.definitions.get(chart_name)
+
+ if not chart_config:
+ self.debug("create() => [NOT ADDED] chart '{chart_name}' not in definitions. "
+ "Skipping it.".format(chart_name=chart_name))
+ continue
+
+ # create chart
+ chart_params = [chart_name] + chart_config['options']
+ try:
+ self.charts.add_chart(params=chart_params)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (chart '{chart}': {error})".format(chart=chart_name,
+ error=error))
+ continue
+
+ # add dimensions to chart
+ for dimension in chart_config['lines']:
+ try:
+ self.charts[chart_name].add_dimension(dimension)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (dimension '{dimension}': {error})".format(dimension=dimension,
+ error=error))
+ continue
+
+ # add variables to chart
+ if 'variables' in chart_config:
+ for variable in chart_config['variables']:
+ try:
+ self.charts[chart_name].add_variable(variable)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (variable '{var}': {error})".format(var=variable,
+ error=error))
+ continue
+
+ del self.order
+ del self.definitions
+
+ # True if job has at least 1 chart else False
+ return bool(self.charts)
+
+ def run(self):
+ """
+ Runs job in thread. Handles retries.
+ Exits when job failed or timed out.
+ :return: None
+ """
+ job = self._runtime_counters
+ self.debug('started, update frequency: {freq}'.format(freq=job.update_every))
+
+ while True:
+ job.sleep_until_next()
+
+ since = 0
+ if job.prev_update:
+ since = int((job.start_real - job.prev_update) * 1e6)
+
+ try:
+ updated = self.update(interval=since)
+ except Exception as error:
+ self.error('update() unhandled exception: {error}'.format(error=error))
+ updated = False
+
+ job.runs += 1
+
+ if not updated:
+ job.handle_retries()
+ else:
+ job.elapsed = int((monotonic() - job.start_mono) * 1e3)
+ job.prev_update = job.start_real
+ job.retries, job.penalty = 0, 0
+ safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
+ since_last=since,
+ elapsed=job.elapsed))
+ self.debug('update => [{status}] (elapsed time: {elapsed}, failed retries in a row: {retries})'.format(
+ status='OK' if updated else 'FAILED',
+ elapsed=job.elapsed if updated else '-',
+ retries=job.retries))
+
+ def update(self, interval):
+ """
+ :return:
+ """
+ data = self.get_data()
+ if not data:
+ self.debug('get_data() returned no data')
+ return False
+ elif not isinstance(data, dict):
+ self.debug('get_data() returned incorrect type data')
+ return False
+
+ updated = False
+
+ for chart in self.charts:
+ if chart.flags.obsoleted:
+ if chart.can_be_updated(data):
+ chart.refresh()
+ else:
+ continue
+ elif self.charts.cleanup and chart.penalty >= self.charts.cleanup:
+ chart.obsolete()
+ self.error("chart '{0}' was suppressed due to non updating".format(chart.name))
+ continue
+
+ ok = chart.update(data, interval)
+ if ok:
+ updated = True
+
+ if not updated:
+ self.debug('none of the charts has been updated')
+
+ return updated
+
+ def get_data(self):
+ return self._get_data()
+
+ def _get_data(self):
+ raise NotImplementedError
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
new file mode 100644
index 0000000..f5e6380
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -0,0 +1,311 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+try:
+ import ssl
+except ImportError:
+ _TLS_SUPPORT = False
+else:
+ _TLS_SUPPORT = True
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class SocketService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ self._sock = None
+ self._keep_alive = False
+ self.host = 'localhost'
+ self.port = None
+ self.unix_socket = None
+ self.dgram_socket = False
+ self.request = ''
+ self.tls = False
+ self.cert = None
+ self.key = None
+ self.__socket_config = None
+ self.__empty_request = "".encode()
+ SimpleService.__init__(self, configuration=configuration, name=name)
+
+ def _socket_error(self, message=None):
+ if self.unix_socket is not None:
+ self.error('unix socket "{socket}": {message}'.format(socket=self.unix_socket,
+ message=message))
+ else:
+ if self.__socket_config is not None:
+ _, _, _, _, sa = self.__socket_config
+ self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0],
+ port=sa[1],
+ message=message))
+ else:
+ self.error('unknown socket: {0}'.format(message))
+
+ def _connect2socket(self, res=None):
+ """
+ Connect to a socket, passing the result of getaddrinfo()
+ :return: boolean
+ """
+ if res is None:
+ res = self.__socket_config
+ if res is None:
+ self.error("Cannot create socket to 'None':")
+ return False
+
+ af, sock_type, proto, _, sa = res
+ try:
+ self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self._sock = socket.socket(af, sock_type, proto)
+ except socket.error as error:
+ self.error('Failed to create socket "{address}", port {port}, error: {error}'.format(address=sa[0],
+ port=sa[1],
+ error=error))
+ self._sock = None
+ self.__socket_config = None
+ return False
+
+ if self.tls:
+ try:
+ self.debug('Encapsulating socket with TLS')
+ self._sock = ssl.wrap_socket(self._sock,
+ keyfile=self.key,
+ certfile=self.cert,
+ server_side=False,
+ cert_reqs=ssl.CERT_NONE,
+ ssl_version=ssl.PROTOCOL_TLS,
+ )
+ except (socket.error, ssl.SSLError) as error:
+ self.error('failed to wrap socket : {0}'.format(error))
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
+ try:
+ self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self._sock.connect(sa)
+ except (socket.error, ssl.SSLError) as error:
+ self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0],
+ port=sa[1],
+ error=error))
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
+ self.debug('connected to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self.__socket_config = res
+ return True
+
+ def _connect2unixsocket(self):
+ """
+ Connect to a unix socket, given its filename
+ :return: boolean
+ """
+ if self.unix_socket is None:
+ self.error("cannot connect to unix socket 'None'")
+ return False
+
+ try:
+ self.debug('attempting DGRAM unix socket "{0}"'.format(self.unix_socket))
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ self._sock.connect(self.unix_socket)
+ self.debug('connected DGRAM unix socket "{0}"'.format(self.unix_socket))
+ return True
+ except socket.error as error:
+ self.debug('Failed to connect DGRAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
+ error=error))
+
+ try:
+ self.debug('attempting STREAM unix socket "{0}"'.format(self.unix_socket))
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self._sock.connect(self.unix_socket)
+ self.debug('connected STREAM unix socket "{0}"'.format(self.unix_socket))
+ return True
+ except socket.error as error:
+ self.debug('Failed to connect STREAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
+ error=error))
+ self._sock = None
+ return False
+
+ def _connect(self):
+ """
+ Recreate socket and connect to it since sockets cannot be reused after closing
+ Available configurations are IPv6, IPv4 or UNIX socket
+ :return:
+ """
+ try:
+ if self.unix_socket is not None:
+ self._connect2unixsocket()
+
+ else:
+ if self.__socket_config is not None:
+ self._connect2socket()
+ else:
+ if self.dgram_socket:
+ sock_type = socket.SOCK_DGRAM
+ else:
+ sock_type = socket.SOCK_STREAM
+ for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, sock_type):
+ if self._connect2socket(res):
+ break
+
+ except Exception:
+ self._sock = None
+ self.__socket_config = None
+
+ if self._sock is not None:
+ self._sock.setblocking(0)
+ self._sock.settimeout(5)
+ self.debug('set socket timeout to: {0}'.format(self._sock.gettimeout()))
+
+ def _disconnect(self):
+ """
+ Close socket connection
+ :return:
+ """
+ if self._sock is not None:
+ try:
+ self.debug('closing socket')
+ self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
+ self._sock.close()
+ except Exception as error:
+ self.error(error)
+ self._sock = None
+
+ def _send(self, request=None):
+ """
+ Send request.
+ :return: boolean
+ """
+ # Send request if it is needed
+ if self.request != self.__empty_request:
+ try:
+ self.debug('sending request: {0}'.format(request or self.request))
+ self._sock.send(request or self.request)
+ except Exception as error:
+ self._socket_error('error sending request: {0}'.format(error))
+ self._disconnect()
+ return False
+ return True
+
+ def _receive(self, raw=False):
+ """
+ Receive data from socket
+ :param raw: set `True` to return bytes
+ :type raw: bool
+ :return: decoded str or raw bytes
+ :rtype: str/bytes
+ """
+ data = "" if not raw else b""
+ while True:
+ self.debug('receiving response')
+ try:
+ buf = self._sock.recv(4096)
+ except Exception as error:
+ self._socket_error('failed to receive response: {0}'.format(error))
+ self._disconnect()
+ break
+
+ if buf is None or len(buf) == 0: # handle server disconnect
+ if data == "" or data == b"":
+ self._socket_error('unexpectedly disconnected')
+ else:
+ self.debug('server closed the connection')
+ self._disconnect()
+ break
+
+ self.debug('received data')
+ data += buf.decode('utf-8', 'ignore') if not raw else buf
+ if self._check_raw_data(data):
+ break
+
+ self.debug('final response: {0}'.format(data))
+ return data
+
+ def _get_raw_data(self, raw=False, request=None):
+ """
+ Get raw data with low-level "socket" module.
+ :param raw: set `True` to return bytes
+ :type raw: bool
+ :return: decoded data (str) or raw data (bytes)
+ :rtype: str/bytes
+ """
+ if self._sock is None:
+ self._connect()
+ if self._sock is None:
+ return None
+
+ # Send request if it is needed
+ if not self._send(request):
+ return None
+
+ data = self._receive(raw)
+
+ if not self._keep_alive:
+ self._disconnect()
+
+ return data
+
+ @staticmethod
+ def _check_raw_data(data):
+ """
+ Check if all data has been gathered from socket
+ :param data: str
+ :return: boolean
+ """
+ return bool(data)
+
+ def _parse_config(self):
+ """
+ Parse configuration data
+ :return: boolean
+ """
+ try:
+ self.unix_socket = str(self.configuration['socket'])
+ except (KeyError, TypeError):
+ self.debug('No unix socket specified. Trying TCP/IP socket.')
+ self.unix_socket = None
+ try:
+ self.host = str(self.configuration['host'])
+ except (KeyError, TypeError):
+ self.debug('No host specified. Using: "{0}"'.format(self.host))
+ try:
+ self.port = int(self.configuration['port'])
+ except (KeyError, TypeError):
+ self.debug('No port specified. Using: "{0}"'.format(self.port))
+
+ self.tls = bool(self.configuration.get('tls', self.tls))
+ if self.tls and not _TLS_SUPPORT:
+ self.warning('TLS requested but no TLS module found, disabling TLS support.')
+ self.tls = False
+ if _TLS_SUPPORT and not self.tls:
+ self.debug('No TLS preference specified, not using TLS.')
+
+ if self.tls and _TLS_SUPPORT:
+ self.key = self.configuration.get('tls_key_file')
+ self.cert = self.configuration.get('tls_cert_file')
+ if not self.cert:
+ # If there's not a valid certificate, clear the key too.
+ self.debug('No valid TLS client certificate configuration found.')
+ self.key = None
+ self.cert = None
+ elif not self.key:
+ # If a key isn't listed, the config may still be
+ # valid, because there may be a key attached to the
+ # certificate.
+ self.info('No TLS client key specified, assuming it\'s attached to the certificate.')
+ self.key = None
+
+ try:
+ self.request = str(self.configuration['request'])
+ except (KeyError, TypeError):
+ self.debug('No request specified. Using: "{0}"'.format(self.request))
+
+ self.request = self.request.encode()
+
+ def check(self):
+ self._parse_config()
+ return SimpleService.check(self)
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
new file mode 100644
index 0000000..011efff
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import urllib3
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+try:
+ urllib3.disable_warnings()
+except AttributeError:
+ pass
+
+
+class UrlService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.proxy_user = self.configuration.get('proxy_user')
+ self.proxy_password = self.configuration.get('proxy_pass')
+ self.proxy_url = self.configuration.get('proxy_url')
+ self.method = self.configuration.get('method', 'GET')
+ self.header = self.configuration.get('header')
+ self.request_timeout = self.configuration.get('timeout', 1)
+ self.respect_retry_after_header = self.configuration.get('respect_retry_after_header')
+ self.tls_verify = self.configuration.get('tls_verify')
+ self.tls_ca_file = self.configuration.get('tls_ca_file')
+ self.tls_key_file = self.configuration.get('tls_key_file')
+ self.tls_cert_file = self.configuration.get('tls_cert_file')
+ self._manager = None
+
+ def __make_headers(self, **header_kw):
+ user = header_kw.get('user') or self.user
+ password = header_kw.get('pass') or self.password
+ proxy_user = header_kw.get('proxy_user') or self.proxy_user
+ proxy_password = header_kw.get('proxy_pass') or self.proxy_password
+ custom_header = header_kw.get('header') or self.header
+ header_params = dict(keep_alive=True)
+ proxy_header_params = dict()
+ if user and password:
+ header_params['basic_auth'] = '{user}:{password}'.format(user=user,
+ password=password)
+ if proxy_user and proxy_password:
+ proxy_header_params['proxy_basic_auth'] = '{user}:{password}'.format(user=proxy_user,
+ password=proxy_password)
+ try:
+ header, proxy_header = urllib3.make_headers(**header_params), urllib3.make_headers(**proxy_header_params)
+ except TypeError as error:
+ self.error('build_header() error: {error}'.format(error=error))
+ return None, None
+ else:
+ header.update(custom_header or dict())
+ return header, proxy_header
+
+ def _build_manager(self, **header_kw):
+ header, proxy_header = self.__make_headers(**header_kw)
+ if header is None or proxy_header is None:
+ return None
+ proxy_url = header_kw.get('proxy_url') or self.proxy_url
+ if proxy_url:
+ manager = urllib3.ProxyManager
+ params = dict(proxy_url=proxy_url, headers=header, proxy_headers=proxy_header)
+ else:
+ manager = urllib3.PoolManager
+ params = dict(headers=header)
+ tls_cert_file = self.tls_cert_file
+ if tls_cert_file:
+ params['cert_file'] = tls_cert_file
+ # NOTE: key_file is useless without cert_file, but
+ # cert_file may include the key as well.
+ tls_key_file = self.tls_key_file
+ if tls_key_file:
+ params['key_file'] = tls_key_file
+ tls_ca_file = self.tls_ca_file
+ if tls_ca_file:
+ params['ca_certs'] = tls_ca_file
+ try:
+ url = header_kw.get('url') or self.url
+ if url.startswith('https') and not self.tls_verify and not tls_ca_file:
+ params['ca_certs'] = None
+ return manager(assert_hostname=False, cert_reqs='CERT_NONE', **params)
+ return manager(**params)
+ except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error:
+ self.error('build_manager() error:', str(error))
+ return None
+
+ def _get_raw_data(self, url=None, manager=None):
+ """
+ Get raw data from http request
+ :return: str
+ """
+ try:
+ status, data = self._get_raw_data_with_status(url, manager)
+ except (urllib3.exceptions.HTTPError, TypeError, AttributeError) as error:
+ self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error))
+ return None
+
+ if status == 200:
+ return data
+ else:
+ self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=status))
+ return None
+
+ def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True):
+ """
+ Get status and response body content from http request. Does not catch exceptions
+ :return: int, str
+ """
+ url = url or self.url
+ manager = manager or self._manager
+ retry = urllib3.Retry(retries)
+ if hasattr(retry, 'respect_retry_after_header'):
+ retry.respect_retry_after_header = bool(self.respect_retry_after_header)
+
+ response = manager.request(
+ method=self.method,
+ url=url,
+ timeout=self.request_timeout,
+ retries=retry,
+ headers=manager.headers,
+ redirect=redirect,
+ )
+ if isinstance(response.data, str):
+ return response.status, response.data
+ return response.status, response.data.decode()
+
+ def check(self):
+ """
+ Format configuration data and try to connect to server
+ :return: boolean
+ """
+ if not (self.url and isinstance(self.url, str)):
+ self.error('URL is not defined or type is not <str>')
+ return False
+
+ self._manager = self._build_manager()
+ if not self._manager:
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Url: {url}. Error: {error}'.format(url=self.url, error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
new file mode 100644
index 0000000..0a07190
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -0,0 +1,394 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.collection import safe_print
+
+CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden']
+DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
+VARIABLE_PARAMS = ['id', 'value']
+
+CHART_TYPES = ['line', 'area', 'stacked']
+DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row']
+
+CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
+CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
+ "{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
+CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
+ "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
+
+
+DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden}'\n"
+DIMENSION_SET = "SET '{id}' = {value}\n"
+
+CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n"
+
+RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time for {job_name}' 'ms' 'python.d' " \
+ "netdata.pythond_runtime line 145000 {update_every}\n" \
+ "DIMENSION run_time 'run time' absolute 1 1\n"
+
+
+def create_runtime_chart(func):
+ """
+ Calls a wrapped function, then prints runtime chart to stdout.
+
+ Used as a decorator for SimpleService.create() method.
+ The whole point of making 'create runtime chart' functionality as a decorator was
+ to help users who re-implements create() in theirs classes.
+
+ :param func: class method
+ :return:
+ """
+ def wrapper(*args, **kwargs):
+ self = args[0]
+ ok = func(*args, **kwargs)
+ if ok:
+ safe_print(RUNTIME_CHART_CREATE.format(job_name=self.name,
+ update_every=self._runtime_counters.update_every))
+ return ok
+ return wrapper
+
+
+class ChartError(Exception):
+ """Base-class for all exceptions raised by this module"""
+
+
+class DuplicateItemError(ChartError):
+ """Occurs when user re-adds a chart or a dimension that has already been added"""
+
+
+class ItemTypeError(ChartError):
+ """Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class"""
+
+
+class ItemValueError(ChartError):
+ """Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class"""
+
+
+class Charts:
+ """Represent a collection of charts
+
+ All charts stored in a dict.
+ Chart is a instance of Chart class.
+ Charts adding must be done using Charts.add_chart() method only"""
+ def __init__(self, job_name, priority, cleanup, get_update_every, module_name):
+ """
+ :param job_name: <bound method>
+ :param priority: <int>
+ :param get_update_every: <bound method>
+ """
+ self.job_name = job_name
+ self.priority = priority
+ self.cleanup = cleanup
+ self.get_update_every = get_update_every
+ self.module_name = module_name
+ self.charts = dict()
+
+ def __len__(self):
+ return len(self.charts)
+
+ def __iter__(self):
+ return iter(self.charts.values())
+
+ def __repr__(self):
+ return 'Charts({0})'.format(self)
+
+ def __str__(self):
+ return str([chart for chart in self.charts])
+
+ def __contains__(self, item):
+ return item in self.charts
+
+ def __getitem__(self, item):
+ return self.charts[item]
+
+ def __delitem__(self, key):
+ del self.charts[key]
+
+ def __bool__(self):
+ return bool(self.charts)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def add_chart(self, params):
+ """
+ Create Chart instance and add it to the dict
+
+ Manually adds job name, priority and update_every to params.
+ :param params: <list>
+ :return:
+ """
+ params = [self.job_name()] + params
+ new_chart = Chart(params)
+
+ new_chart.params['update_every'] = self.get_update_every()
+ new_chart.params['priority'] = self.priority
+ new_chart.params['module_name'] = self.module_name
+
+ self.priority += 1
+ self.charts[new_chart.id] = new_chart
+
+ return new_chart
+
+ def active_charts(self):
+ return [chart.id for chart in self if not chart.flags.obsoleted]
+
+
+class Chart:
+ """Represent a chart"""
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'chart' must be a list type")
+ if not len(params) >= 8:
+ raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS))
+
+ self.params = dict(zip(CHART_PARAMS, (p or str() for p in params)))
+ self.name = '{type}.{id}'.format(type=self.params['type'],
+ id=self.params['id'])
+ if self.params.get('chart_type') not in CHART_TYPES:
+ self.params['chart_type'] = 'absolute'
+ hidden = str(self.params.get('hidden', ''))
+ self.params['hidden'] = 'hidden' if hidden == 'hidden' else ''
+
+ self.dimensions = list()
+ self.variables = set()
+ self.flags = ChartFlags()
+ self.penalty = 0
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __repr__(self):
+ return 'Chart({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __iter__(self):
+ return iter(self.dimensions)
+
+ def __contains__(self, item):
+ return item in [dimension.id for dimension in self.dimensions]
+
+ def add_variable(self, variable):
+ """
+ :param variable: <list>
+ :return:
+ """
+ self.variables.add(ChartVariable(variable))
+
+ def add_dimension(self, dimension):
+ """
+ :param dimension: <list>
+ :return:
+ """
+ dim = Dimension(dimension)
+
+ if dim.id in self:
+ raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id,
+ chart=self.name))
+ self.refresh()
+ self.dimensions.append(dim)
+ return dim
+
+ def hide_dimension(self, dimension_id, reverse=False):
+ if dimension_id in self:
+ idx = self.dimensions.index(dimension_id)
+ dimension = self.dimensions[idx]
+ dimension.params['hidden'] = 'hidden' if not reverse else str()
+ self.refresh()
+
+ def create(self):
+ """
+ :return:
+ """
+ chart = CHART_CREATE.format(**self.params)
+ dimensions = ''.join([dimension.create() for dimension in self.dimensions])
+ variables = ''.join([var.set(var.value) for var in self.variables if var])
+
+ self.flags.push = False
+ self.flags.created = True
+
+ safe_print(chart + dimensions + variables)
+
+ def can_be_updated(self, data):
+ for dim in self.dimensions:
+ if dim.get_value(data) is not None:
+ return True
+ return False
+
+ def update(self, data, interval):
+ updated_dimensions, updated_variables = str(), str()
+
+ for dim in self.dimensions:
+ value = dim.get_value(data)
+ if value is not None:
+ updated_dimensions += dim.set(value)
+
+ for var in self.variables:
+ value = var.get_value(data)
+ if value is not None:
+ updated_variables += var.set(value)
+
+ if updated_dimensions:
+ since_last = interval if self.flags.updated else 0
+
+ if self.flags.push:
+ self.create()
+
+ chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last)
+ safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n')
+
+ self.flags.updated = True
+ self.penalty = 0
+ else:
+ self.penalty += 1
+ self.flags.updated = False
+
+ return bool(updated_dimensions)
+
+ def obsolete(self):
+ self.flags.obsoleted = True
+ if self.flags.created:
+ safe_print(CHART_OBSOLETE.format(**self.params))
+
+ def refresh(self):
+ self.penalty = 0
+ self.flags.push = True
+ self.flags.obsoleted = False
+
+
+class Dimension:
+ """Represent a dimension"""
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'dimension' must be a list type")
+ if not params:
+ raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS))
+
+ self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params)))
+ self.params['name'] = self.params.get('name') or self.params['id']
+
+ if self.params.get('algorithm') not in DIMENSION_ALGORITHMS:
+ self.params['algorithm'] = 'absolute'
+ if not isinstance(self.params.get('multiplier'), int):
+ self.params['multiplier'] = 1
+ if not isinstance(self.params.get('divisor'), int):
+ self.params['divisor'] = 1
+ self.params.setdefault('hidden', '')
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __repr__(self):
+ return 'Dimension({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __eq__(self, other):
+ if not isinstance(other, Dimension):
+ return self.id == other
+ return self.id == other.id
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def create(self):
+ return DIMENSION_CREATE.format(**self.params)
+
+ def set(self, value):
+ """
+ :param value: <str>: must be a digit
+ :return:
+ """
+ return DIMENSION_SET.format(id=self.id,
+ value=value)
+
+ def get_value(self, data):
+ try:
+ return int(data[self.id])
+ except (KeyError, TypeError):
+ return None
+
+
+class ChartVariable:
+ """Represent a chart variable"""
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'variable' must be a list type")
+ if not params:
+ raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS))
+
+ self.params = dict(zip(VARIABLE_PARAMS, params))
+ self.params.setdefault('value', None)
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __bool__(self):
+ return self.value is not None
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def __repr__(self):
+ return 'ChartVariable({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __eq__(self, other):
+ if isinstance(other, ChartVariable):
+ return self.id == other.id
+ return False
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def set(self, value):
+ return CHART_VARIABLE_SET.format(id=self.id,
+ value=value)
+
+ def get_value(self, data):
+ try:
+ return int(data[self.id])
+ except (KeyError, TypeError):
+ return None
+
+
+class ChartFlags:
+ def __init__(self):
+ self.push = True
+ self.created = False
+ self.updated = False
+ self.obsoleted = False
diff --git a/collectors/python.d.plugin/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
new file mode 100644
index 0000000..479a3b6
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/collection.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':')
+
+CHART_BEGIN = 'BEGIN {0} {1}\n'
+CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n"
+DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n"
+DIMENSION_SET = "SET '{0}' = {1}\n"
+
+
+def setdefault_values(config, base_dict):
+ for key, value in base_dict.items():
+ config.setdefault(key, value)
+ return config
+
+
+def run_and_exit(func):
+ def wrapper(*args, **kwargs):
+ func(*args, **kwargs)
+ exit(1)
+ return wrapper
+
+
+def on_try_except_finally(on_except=(None, ), on_finally=(None, )):
+ except_func = on_except[0]
+ finally_func = on_finally[0]
+
+ def decorator(func):
+ def wrapper(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except Exception:
+ if except_func:
+ except_func(*on_except[1:])
+ finally:
+ if finally_func:
+ finally_func(*on_finally[1:])
+ return wrapper
+ return decorator
+
+
+def static_vars(**kwargs):
+ def decorate(func):
+ for k in kwargs:
+ setattr(func, k, kwargs[k])
+ return func
+ return decorate
+
+
+@on_try_except_finally(on_except=(exit, 1))
+def safe_print(*msg):
+ """
+ :param msg:
+ :return:
+ """
+ print(''.join(msg))
+
+
+def find_binary(binary):
+ """
+ :param binary: <str>
+ :return:
+ """
+ for directory in PATH:
+ binary_name = '/'.join([directory, binary])
+ if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK):
+ return binary_name
+ return None
+
+
+def read_last_line(f):
+ with open(f, 'rb') as opened:
+ opened.seek(-2, 2)
+ while opened.read(1) != b'\n':
+ opened.seek(-2, 1)
+ if opened.tell() == 0:
+ break
+ result = opened.readline()
+ return result.decode()
+
+
+class OldVersionCompatibility:
+
+ def __init__(self):
+ self._data_stream = str()
+
+ def begin(self, type_id, microseconds=0):
+ """
+ :param type_id: <str>
+ :param microseconds: <str> or <int>: must be a digit
+ :return:
+ """
+ self._data_stream += CHART_BEGIN.format(type_id, microseconds)
+
+ def set(self, dim_id, value):
+ """
+ :param dim_id: <str>
+ :param value: <int> or <str>: must be a digit
+ :return:
+ """
+ self._data_stream += DIMENSION_SET.format(dim_id, value)
+
+ def end(self):
+ self._data_stream += 'END\n'
+
+ def chart(self, type_id, name='', title='', units='', family='', category='', chart_type='line',
+ priority='', update_every=''):
+ """
+ :param type_id: <str>
+ :param name: <str>
+ :param title: <str>
+ :param units: <str>
+ :param family: <str>
+ :param category: <str>
+ :param chart_type: <str>
+ :param priority: <str> or <int>
+ :param update_every: <str> or <int>
+ :return:
+ """
+ self._data_stream += CHART_CREATE.format(type_id, name, title, units,
+ family, category, chart_type,
+ priority, update_every)
+
+ def dimension(self, dim_id, name=None, algorithm="absolute", multiplier=1, divisor=1, hidden=False):
+ """
+ :param dim_id: <str>
+ :param name: <str> or None
+ :param algorithm: <str>
+ :param multiplier: <str> or <int>: must be a digit
+ :param divisor: <str> or <int>: must be a digit
+ :param hidden: <str>: literally "hidden" or ""
+ :return:
+ """
+ self._data_stream += DIMENSION_CREATE.format(dim_id, name or dim_id, algorithm,
+ multiplier, divisor, hidden or str())
+
+ @on_try_except_finally(on_except=(exit, 1))
+ def commit(self):
+ print(self._data_stream)
+ self._data_stream = str()
diff --git a/collectors/python.d.plugin/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py
new file mode 100644
index 0000000..9eb268c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/loaders.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import types
+
+from sys import version_info
+
+PY_VERSION = version_info[:2]
+
+try:
+ if PY_VERSION > (3, 1):
+ from pyyaml3 import SafeLoader as YamlSafeLoader
+ else:
+ from pyyaml2 import SafeLoader as YamlSafeLoader
+except ImportError:
+ from yaml import SafeLoader as YamlSafeLoader
+
+
+if PY_VERSION > (3, 1):
+ from importlib.machinery import SourceFileLoader
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+else:
+ from imp import load_source as SourceFileLoader
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+
+def dict_constructor(loader, node):
+ return OrderedDict(loader.construct_pairs(node))
+
+
+def safe_load(stream):
+ loader = YamlSafeLoader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+
+YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor)
+
+
+class YamlOrderedLoader:
+ @staticmethod
+ def load_config_from_file(file_name):
+ opened, loaded = False, False
+ try:
+ stream = open(file_name, 'r')
+ opened = True
+ loader = YamlSafeLoader(stream)
+ loaded = True
+ parsed = loader.get_single_data() or dict()
+ except Exception as error:
+ return dict(), error
+ else:
+ return parsed, None
+ finally:
+ if opened:
+ stream.close()
+ if loaded:
+ loader.dispose()
+
+
+class SourceLoader:
+ @staticmethod
+ def load_module_from_file(name, path):
+ try:
+ loaded = SourceFileLoader(name, path)
+ if isinstance(loaded, types.ModuleType):
+ return loaded, None
+ return loaded.load_module(), None
+ except Exception as error:
+ return None, error
+
+
+class ModuleAndConfigLoader(YamlOrderedLoader, SourceLoader):
+ pass
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
new file mode 100644
index 0000000..098294d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import logging
+import traceback
+
+from sys import exc_info
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.collection import on_try_except_finally
+
+
+LOGGING_LEVELS = {'CRITICAL': 50,
+ 'ERROR': 40,
+ 'WARNING': 30,
+ 'INFO': 20,
+ 'DEBUG': 10,
+ 'NOTSET': 0}
+
+DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s'
+DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
+
+PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s: %(job_name)s: %(message)s'
+PYTHON_D_LOG_NAME = 'python.d'
+
+
+def limiter(log_max_count=30, allowed_in_seconds=60):
+ def on_decorator(func):
+
+ def on_call(*args):
+ current_time = args[0]._runtime_counters.start_mono
+ lc = args[0]._logger_counters
+
+ if lc.logged and lc.logged % log_max_count == 0:
+ if current_time - lc.time_to_compare <= allowed_in_seconds:
+ lc.dropped += 1
+ return
+ lc.time_to_compare = current_time
+
+ lc.logged += 1
+ func(*args)
+
+ return on_call
+ return on_decorator
+
+
+def add_traceback(func):
+ def on_call(*args):
+ self = args[0]
+
+ if not self.log_traceback:
+ func(*args)
+ else:
+ if exc_info()[0]:
+ func(*args)
+ func(self, traceback.format_exc())
+ else:
+ func(*args)
+
+ return on_call
+
+
+class LoggerCounters:
+ def __init__(self):
+ self.logged = 0
+ self.dropped = 0
+ self.time_to_compare = time()
+
+ def __repr__(self):
+ return 'LoggerCounter(logged: {logged}, dropped: {dropped})'.format(logged=self.logged,
+ dropped=self.dropped)
+
+
+class BaseLogger(object):
+ def __init__(self, logger_name, log_fmt=DEFAULT_LOG_LINE_FORMAT, date_fmt=DEFAULT_LOG_TIME_FORMAT,
+ handler=logging.StreamHandler):
+ """
+ :param logger_name: <str>
+ :param log_fmt: <str>
+ :param date_fmt: <str>
+ :param handler: <logging handler>
+ """
+ self.logger = logging.getLogger(logger_name)
+ if not self.has_handlers():
+ self.severity = 'INFO'
+ self.logger.addHandler(handler())
+ self.set_formatter(fmt=log_fmt, date_fmt=date_fmt)
+
+ def __repr__(self):
+ return '<Logger: {name})>'.format(name=self.logger.name)
+
+ def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT):
+ """
+ :param fmt: <str>
+ :param date_fmt: <str>
+ :return:
+ """
+ if self.has_handlers():
+ self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt))
+
+ def has_handlers(self):
+ return self.logger.handlers
+
+ @property
+ def severity(self):
+ return self.logger.getEffectiveLevel()
+
+ @severity.setter
+ def severity(self, level):
+ """
+ :param level: <str> or <int>
+ :return:
+ """
+ if level in LOGGING_LEVELS:
+ self.logger.setLevel(LOGGING_LEVELS[level])
+
+ def debug(self, *msg, **kwargs):
+ self.logger.debug(' '.join(map(str, msg)), **kwargs)
+
+ def info(self, *msg, **kwargs):
+ self.logger.info(' '.join(map(str, msg)), **kwargs)
+
+ def warning(self, *msg, **kwargs):
+ self.logger.warning(' '.join(map(str, msg)), **kwargs)
+
+ def error(self, *msg, **kwargs):
+ self.logger.error(' '.join(map(str, msg)), **kwargs)
+
+ def alert(self, *msg, **kwargs):
+ self.logger.critical(' '.join(map(str, msg)), **kwargs)
+
+ @on_try_except_finally(on_finally=(exit, 1))
+ def fatal(self, *msg, **kwargs):
+ self.logger.critical(' '.join(map(str, msg)), **kwargs)
+
+
+class PythonDLogger(object):
+ def __init__(self, logger_name=PYTHON_D_LOG_NAME, log_fmt=PYTHON_D_LOG_LINE_FORMAT):
+ """
+ :param logger_name: <str>
+ :param log_fmt: <str>
+ """
+ self.logger = BaseLogger(logger_name, log_fmt=log_fmt)
+ self.module_name = 'plugin'
+ self.job_name = 'main'
+ self._logger_counters = LoggerCounters()
+
+ _LOG_TRACEBACK = False
+
+ @property
+ def log_traceback(self):
+ return PythonDLogger._LOG_TRACEBACK
+
+ @log_traceback.setter
+ def log_traceback(self, value):
+ PythonDLogger._LOG_TRACEBACK = value
+
+ def debug(self, *msg):
+ self.logger.debug(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ def info(self, *msg):
+ self.logger.info(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ def warning(self, *msg):
+ self.logger.warning(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ @add_traceback
+ def error(self, *msg):
+ self.logger.error(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ @add_traceback
+ def alert(self, *msg):
+ self.logger.alert(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+ def fatal(self, *msg):
+ self.logger.fatal(*msg, extra={'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name})
+
+
+class PythonDLimitedLogger(PythonDLogger):
+ @limiter()
+ def info(self, *msg):
+ PythonDLogger.info(self, *msg)
+
+ @limiter()
+ def warning(self, *msg):
+ PythonDLogger.warning(self, *msg)
+
+ @limiter()
+ def error(self, *msg):
+ PythonDLogger.error(self, *msg)
+
+ @limiter()
+ def alert(self, *msg):
+ PythonDLogger.alert(self, *msg)
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
new file mode 100644
index 0000000..4d560e4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
@@ -0,0 +1,316 @@
+# SPDX-License-Identifier: MIT
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '3.11'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
new file mode 100644
index 0000000..6b41b80
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
new file mode 100644
index 0000000..8ad1b90
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
@@ -0,0 +1,676 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
new file mode 100644
index 0000000..2858ab4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
new file mode 100644
index 0000000..3685cbe
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
new file mode 100644
index 0000000..9a460a0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
@@ -0,0 +1,1141 @@
+# SPDX-License-Identifier: MIT
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from error import YAMLError
+from events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
new file mode 100644
index 0000000..5466be7
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
new file mode 100644
index 0000000..283452a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: MIT
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
new file mode 100644
index 0000000..1c19553
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
new file mode 100644
index 0000000..ed2a1b4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: MIT
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
new file mode 100644
index 0000000..97ba083
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
@@ -0,0 +1,590 @@
+# SPDX-License-Identifier: MIT
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
new file mode 100644
index 0000000..8d42295
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
@@ -0,0 +1,191 @@
+# SPDX-License-Identifier: MIT
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
new file mode 100644
index 0000000..0a1404e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
@@ -0,0 +1,485 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import sys, copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
new file mode 100644
index 0000000..49922de
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
new file mode 100644
index 0000000..971da61
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
@@ -0,0 +1,1458 @@
+# SPDX-License-Identifier: MIT
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in u',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == u':'
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
new file mode 100644
index 0000000..15fdbb0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
new file mode 100644
index 0000000..c5c4fb1
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: MIT
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
new file mode 100644
index 0000000..a884b33
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
@@ -0,0 +1,313 @@
+# SPDX-License-Identifier: MIT
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '3.11'
+try:
+ from .cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+import io
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = io.StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
new file mode 100644
index 0000000..c418bba
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer:
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor, event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor, self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
new file mode 100644
index 0000000..ee09a7a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
@@ -0,0 +1,687 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from .error import *
+from .nodes import *
+
+import collections, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor:
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if not isinstance(key, collections.Hashable):
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unhashable key", key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return super().construct_scalar(node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return super().construct_mapping(node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ r'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag,
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ def construct_python_long(self, node):
+ return self.construct_yaml_int(node)
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name, exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if '.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = 'builtins'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name, exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r"
+ % (object_name, module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes',
+ Constructor.construct_python_bytes)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
new file mode 100644
index 0000000..e6c16d8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
new file mode 100644
index 0000000..ba590c6
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
new file mode 100644
index 0000000..d4be65a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
@@ -0,0 +1,1138 @@
+# SPDX-License-Identifier: MIT
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis:
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter:
+
+ DEFAULT_TAG_PREFIXES = {
+ '!' : '!',
+ 'tag:yaml.org,2002:' : '!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor('&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator('[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator('{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator('-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == '')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return '%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError("tag handle must start and end with '!': %r" % handle)
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch, handle))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return ''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == '!' and handle != '!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = ''.join(chunks)
+ if handle:
+ return '%s%s' % (handle, suffix_text)
+ else:
+ return '!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch, anchor))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',?[]{}':
+ flow_indicators = True
+ if ch == ':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = '%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = '%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator('\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == '\'':
+ data = '\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator('\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '\"': '\"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+ or not ('\x20' <= ch <= '\x7E'
+ or (self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ hints += str(self.best_indent)
+ if text[-1] not in '\n\x85\u2028\u2029':
+ hints += '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ hints += '+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('>'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != ' ' \
+ and text[start] == '\n':
+ self.write_line_break()
+ leading_space = (ch == ' ')
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ spaces = (ch == ' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('|'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
new file mode 100644
index 0000000..5fec7d4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
new file mode 100644
index 0000000..283452a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: MIT
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
new file mode 100644
index 0000000..7ef6cf8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
new file mode 100644
index 0000000..ed2a1b4
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: MIT
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
new file mode 100644
index 0000000..bcec7f9
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
@@ -0,0 +1,590 @@
+# SPDX-License-Identifier: MIT
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ '!': '!',
+ '!!': 'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == 'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle,
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle,
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == '!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), '',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
new file mode 100644
index 0000000..0a515fd
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: MIT
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, bytes):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, str):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+'\0'
+ elif isinstance(stream, bytes):
+ self.name = "<byte string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' \
+ or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=4096):
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
new file mode 100644
index 0000000..756a18d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
@@ -0,0 +1,375 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, sys, copyreg, types, base64
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter:
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, bytes, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data):
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if '.' not in value and 'e' in value:
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+ SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = '%r' % data.real
+ elif data.real == 0.0:
+ data = '%rj' % data.imag
+ elif data.imag > 0:
+ data = '%r+%rj' % (data.real, data.imag)
+ else:
+ data = '%r%rj' % (data.real, data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = '%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ 'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = '%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
new file mode 100644
index 0000000..50945e0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, str) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (str, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, str):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == '':
+ resolvers = self.yaml_implicit_resolvers.get('', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:bool',
+ re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:float',
+ re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:int',
+ re.compile(r'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:merge',
+ re.compile(r'^(?:<<)$'),
+ ['<'])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:null',
+ re.compile(r'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:timestamp',
+ re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:value',
+ re.compile(r'^(?:=)$'),
+ ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:yaml',
+ re.compile(r'^(?:!|&|\*)$'),
+ list('!&*'))
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
new file mode 100644
index 0000000..b55854e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
@@ -0,0 +1,1449 @@
+# SPDX-License-Identifier: MIT
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner:
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == '\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token" % ch,
+ self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '---' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '...' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+ and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == '\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r" % self.peek(),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch, self.get_mark())
+ length = 0
+ while '0' <= self.peek(length) <= '9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == ' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != ' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch, self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == '<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != '>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ elif ch in '\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = '!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in ' \t'
+ length = 0
+ while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == '\n' \
+ and leading_non_space and self.peek() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == '\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch, self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r" % ch,
+ self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ while self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '\"': '\"',
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {
+ 'x': 2,
+ 'u': 4,
+ 'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == '\'' and self.peek(1) == '\'':
+ chunks.append('\'')
+ self.forward(2)
+ elif (double and ch == '\'') or (not double and ch in '\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == '\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k)), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(chr(code))
+ self.forward(length)
+ elif ch in '\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch, self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in ' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == '\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in ' \t':
+ self.forward()
+ if self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == '#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in '\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == ':' and
+ self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in ',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == ':'
+ and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == '#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in ' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != '!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != '!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == '%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch, self.get_mark())
+ return ''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ codes = []
+ mark = self.get_mark()
+ while self.peek() == '%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+ % self.peek(k), self.get_mark())
+ codes.append(int(self.prefix(2), 16))
+ self.forward(2)
+ try:
+ value = bytes(codes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in '\r\n\x85':
+ if self.prefix(2) == '\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.forward()
+ return ch
+ return ''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
new file mode 100644
index 0000000..1ba2f7f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer:
+
+ ANCHOR_TEMPLATE = 'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
new file mode 100644
index 0000000..c5c4fb1
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: MIT
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/collectors/python.d.plugin/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
new file mode 100644
index 0000000..ec21779
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
@@ -0,0 +1,515 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# client.py - Somewhat higher-level GUI_RPC API for BOINC core client
+#
+# Copyright (C) 2013 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
+# Copyright (C) 2017 Austin S. Hemmelgarn
+#
+# SPDX-License-Identifier: GPL-3.0
+
+# Based on client/boinc_cmd.cpp
+
+import hashlib
+import socket
+import sys
+import time
+from functools import total_ordering
+from xml.etree import ElementTree
+
+GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg"
+
+GUI_RPC_HOSTNAME = None # localhost
+GUI_RPC_PORT = 31416
+GUI_RPC_TIMEOUT = 1
+
+class Rpc(object):
+ ''' Class to perform GUI RPC calls to a BOINC core client.
+ Usage in a context manager ('with' block) is recommended to ensure
+ disconnect() is called. Using the same instance for all calls is also
+ recommended so it reuses the same socket connection
+ '''
+ def __init__(self, hostname="", port=0, timeout=0, text_output=False):
+ self.hostname = hostname
+ self.port = port
+ self.timeout = timeout
+ self.sock = None
+ self.text_output = text_output
+
+ @property
+ def sockargs(self):
+ return (self.hostname, self.port, self.timeout)
+
+ def __enter__(self): self.connect(*self.sockargs); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self, hostname="", port=0, timeout=0):
+ ''' Connect to (hostname, port) with timeout in seconds.
+ Hostname defaults to None (localhost), and port to 31416
+ Calling multiple times will disconnect previous connection (if any),
+ and (re-)connect to host.
+ '''
+ if self.sock:
+ self.disconnect()
+
+ self.hostname = hostname or GUI_RPC_HOSTNAME
+ self.port = port or GUI_RPC_PORT
+ self.timeout = timeout or GUI_RPC_TIMEOUT
+
+ self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2])
+
+ def disconnect(self):
+ ''' Disconnect from host. Calling multiple times is OK (idempotent)
+ '''
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+
+ def call(self, request, text_output=None):
+ ''' Do an RPC call. Pack and send the XML request and return the
+ unpacked reply. request can be either plain XML text or a
+ xml.etree.ElementTree.Element object. Return ElementTree.Element
+ or XML text according to text_output flag.
+ Will auto-connect if not connected.
+ '''
+ if text_output is None:
+ text_output = self.text_output
+
+ if not self.sock:
+ self.connect(*self.sockargs)
+
+ if not isinstance(request, ElementTree.Element):
+ request = ElementTree.fromstring(request)
+
+ # pack request
+ end = '\003'
+ if sys.version_info[0] < 3:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end)
+ else:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode()
+
+ try:
+ self.sock.sendall(req)
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout):
+ raise
+
+ req = ""
+ while True:
+ try:
+ buf = self.sock.recv(8192)
+ if not buf:
+ raise socket.error("No data from socket")
+ if sys.version_info[0] >= 3:
+ buf = buf.decode()
+ except socket.error:
+ raise
+ n = buf.find(end)
+ if not n == -1: break
+ req += buf
+ req += buf[:n]
+
+ # unpack reply (remove root tag, ie: first and last lines)
+ req = '\n'.join(req.strip().rsplit('\n')[1:-1])
+
+ if text_output:
+ return req
+ else:
+ return ElementTree.fromstring(req)
+
+def setattrs_from_xml(obj, xml, attrfuncdict={}):
+ ''' Helper to set values for attributes of a class instance by mapping
+ matching tags from a XML file.
+ attrfuncdict is a dict of functions to customize value data type of
+ each attribute. It falls back to simple int/float/bool/str detection
+ based on values defined in __init__(). This would not be needed if
+ Boinc used standard RPC protocol, which includes data type in XML.
+ '''
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+ for e in list(xml):
+ if hasattr(obj, e.tag):
+ attr = getattr(obj, e.tag)
+ attrfunc = attrfuncdict.get(e.tag, None)
+ if attrfunc is None:
+ if isinstance(attr, bool): attrfunc = parse_bool
+ elif isinstance(attr, int): attrfunc = parse_int
+ elif isinstance(attr, float): attrfunc = parse_float
+ elif isinstance(attr, str): attrfunc = parse_str
+ elif isinstance(attr, list): attrfunc = parse_list
+ else: attrfunc = lambda x: x
+ setattr(obj, e.tag, attrfunc(e))
+ else:
+ pass
+ #print "class missing attribute '%s': %r" % (e.tag, obj)
+ return obj
+
+
+def parse_bool(e):
+ ''' Helper to convert ElementTree.Element.text to boolean.
+ Treat '<foo/>' (and '<foo>[[:blank:]]</foo>') as True
+ Treat '0' and 'false' as False
+ '''
+ if e.text is None:
+ return True
+ else:
+ return bool(e.text) and not e.text.strip().lower() in ('0', 'false')
+
+
+def parse_int(e):
+ ''' Helper to convert ElementTree.Element.text to integer.
+ Treat '<foo/>' (and '<foo></foo>') as 0
+ '''
+ # int(float()) allows casting to int a value expressed as float in XML
+ return 0 if e.text is None else int(float(e.text.strip()))
+
+
+def parse_float(e):
+ ''' Helper to convert ElementTree.Element.text to float. '''
+ return 0.0 if e.text is None else float(e.text.strip())
+
+
+def parse_str(e):
+ ''' Helper to convert ElementTree.Element.text to string. '''
+ return "" if e.text is None else e.text.strip()
+
+
+def parse_list(e):
+ ''' Helper to convert ElementTree.Element to list. For now, simply return
+ the list of root element's children
+ '''
+ return list(e)
+
+
+class Enum(object):
+ UNKNOWN = -1 # Not in original API
+
+ @classmethod
+ def name(cls, value):
+ ''' Quick-and-dirty fallback for getting the "name" of an enum item '''
+
+ # value as string, if it matches an enum attribute.
+ # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE)
+ if hasattr(cls, str(value)):
+ return cls.name(getattr(cls, value, None))
+
+ # value not handled in subclass name()
+ for k, v in cls.__dict__.items():
+ if v == value:
+ return k.lower().replace('_', ' ')
+
+ # value not found
+ return cls.name(Enum.UNKNOWN)
+
+
+class CpuSched(Enum):
+ ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state
+ "SCHEDULED" is synonymous with "executing" except when CPU throttling
+ is in use.
+ '''
+ UNINITIALIZED = 0
+ PREEMPTED = 1
+ SCHEDULED = 2
+
+
+class ResultState(Enum):
+ ''' Values of RESULT::state in client.
+ THESE MUST BE IN NUMERICAL ORDER
+ (because of the > comparison in RESULT::computing_done())
+ see html/inc/common_defs.inc
+ '''
+ NEW = 0
+ #// New result
+ FILES_DOWNLOADING = 1
+ #// Input files for result (WU, app version) are being downloaded
+ FILES_DOWNLOADED = 2
+ #// Files are downloaded, result can be (or is being) computed
+ COMPUTE_ERROR = 3
+ #// computation failed; no file upload
+ FILES_UPLOADING = 4
+ #// Output files for result are being uploaded
+ FILES_UPLOADED = 5
+ #// Files are uploaded, notify scheduling server at some point
+ ABORTED = 6
+ #// result was aborted
+ UPLOAD_FAILED = 7
+ #// some output file permanent failure
+
+
+class Process(Enum):
+ ''' values of ACTIVE_TASK::task_state '''
+ UNINITIALIZED = 0
+ #// process doesn't exist yet
+ EXECUTING = 1
+ #// process is running, as far as we know
+ SUSPENDED = 9
+ #// we've sent it a "suspend" message
+ ABORT_PENDING = 5
+ #// process exceeded limits; send "abort" message, waiting to exit
+ QUIT_PENDING = 8
+ #// we've sent it a "quit" message, waiting to exit
+ COPY_PENDING = 10
+ #// waiting for async file copies to finish
+
+
+class _Struct(object):
+ ''' base helper class with common methods for all classes derived from
+ BOINC's C++ structs
+ '''
+ @classmethod
+ def parse(cls, xml):
+ return setattrs_from_xml(cls(), xml)
+
+ def __str__(self, indent=0):
+ buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ buf += '{0}\t{1} [\n'.format('\t' * indent, attr)
+ for v in value: buf += '\t\t{0}\t\t,\n'.format(v)
+ buf += '\t]\n'
+ else:
+ buf += '{0}\t{1}\t{2}\n'.format('\t' * indent,
+ attr,
+ value.__str__(indent+2)
+ if isinstance(value, _Struct)
+ else repr(value))
+ return buf
+
+
+@total_ordering
+class VersionInfo(_Struct):
+ def __init__(self, major=0, minor=0, release=0):
+ self.major = major
+ self.minor = minor
+ self.release = release
+
+ @property
+ def _tuple(self):
+ return (self.major, self.minor, self.release)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self._tuple == other._tuple
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __gt__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self._tuple > other._tuple
+
+ def __str__(self):
+ return "{0}.{1}.{2}".format(self.major, self.minor, self.release)
+
+ def __repr__(self):
+ return "{0}{1}".format(self.__class__.__name__, self._tuple)
+
+
+class Result(_Struct):
+ ''' Also called "task" in some contexts '''
+ def __init__(self):
+ # Names and values follow lib/gui_rpc_client.h @ RESULT
+ # Order too, except when grouping contradicts client/result.cpp
+ # RESULT::write_gui(), then XML order is used.
+
+ self.name = ""
+ self.wu_name = ""
+ self.version_num = 0
+ #// identifies the app used
+ self.plan_class = ""
+ self.project_url = "" # from PROJECT.master_url
+ self.report_deadline = 0.0 # seconds since epoch
+ self.received_time = 0.0 # seconds since epoch
+ #// when we got this from server
+ self.ready_to_report = False
+ #// we're ready to report this result to the server;
+ #// either computation is done and all the files have been uploaded
+ #// or there was an error
+ self.got_server_ack = False
+ #// we've received the ack for this result from the server
+ self.final_cpu_time = 0.0
+ self.final_elapsed_time = 0.0
+ self.state = ResultState.NEW
+ self.estimated_cpu_time_remaining = 0.0
+ #// actually, estimated elapsed time remaining
+ self.exit_status = 0
+ #// return value from the application
+ self.suspended_via_gui = False
+ self.project_suspended_via_gui = False
+ self.edf_scheduled = False
+ #// temporary used to tell GUI that this result is deadline-scheduled
+ self.coproc_missing = False
+ #// a coproc needed by this job is missing
+ #// (e.g. because user removed their GPU board).
+ self.scheduler_wait = False
+ self.scheduler_wait_reason = ""
+ self.network_wait = False
+ self.resources = ""
+ #// textual description of resources used
+
+ #// the following defined if active
+ # XML is generated in client/app.cpp ACTIVE_TASK::write_gui()
+ self.active_task = False
+ self.active_task_state = Process.UNINITIALIZED
+ self.app_version_num = 0
+ self.slot = -1
+ self.pid = 0
+ self.scheduler_state = CpuSched.UNINITIALIZED
+ self.checkpoint_cpu_time = 0.0
+ self.current_cpu_time = 0.0
+ self.fraction_done = 0.0
+ self.elapsed_time = 0.0
+ self.swap_size = 0
+ self.working_set_size_smoothed = 0.0
+ self.too_large = False
+ self.needs_shmem = False
+ self.graphics_exec_path = ""
+ self.web_graphics_url = ""
+ self.remote_desktop_addr = ""
+ self.slot_path = ""
+ #// only present if graphics_exec_path is
+
+ # The following are not in original API, but are present in RPC XML reply
+ self.completed_time = 0.0
+ #// time when ready_to_report was set
+ self.report_immediately = False
+ self.working_set_size = 0
+ self.page_fault_rate = 0.0
+ #// derived by higher-level code
+
+ # The following are in API, but are NEVER in RPC XML reply. Go figure
+ self.signal = 0
+
+ self.app = None # APP*
+ self.wup = None # WORKUNIT*
+ self.project = None # PROJECT*
+ self.avp = None # APP_VERSION*
+
+ @classmethod
+ def parse(cls, xml):
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+
+ # parse main XML
+ result = super(Result, cls).parse(xml)
+
+ # parse '<active_task>' children
+ active_task = xml.find('active_task')
+ if active_task is None:
+ result.active_task = False # already the default after __init__()
+ else:
+ result.active_task = True # already the default after main parse
+ result = setattrs_from_xml(result, active_task)
+
+ #// if CPU time is nonzero but elapsed time is zero,
+ #// we must be talking to an old client.
+ #// Set elapsed = CPU
+ #// (easier to deal with this here than in the manager)
+ if result.current_cpu_time != 0 and result.elapsed_time == 0:
+ result.elapsed_time = result.current_cpu_time
+
+ if result.final_cpu_time != 0 and result.final_elapsed_time == 0:
+ result.final_elapsed_time = result.final_cpu_time
+
+ return result
+
+ def __str__(self):
+ buf = '{0}:\n'.format(self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if attr in ['received_time', 'report_deadline']:
+ value = time.ctime(value)
+ buf += '\t{0}\t{1}\n'.format(attr, value)
+ return buf
+
+
+class BoincClient(object):
+
+ def __init__(self, host="", port=0, passwd=None):
+ self.hostname = host
+ self.port = port
+ self.passwd = passwd
+ self.rpc = Rpc(text_output=False)
+ self.version = None
+ self.authorized = False
+
+ # Informative, not authoritative. Records status of *last* RPC call,
+ # but does not infer success about the *next* one.
+ # Thus, it should be read *after* an RPC call, not prior to one
+ self.connected = False
+
+ def __enter__(self): self.connect(); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self):
+ try:
+ self.rpc.connect(self.hostname, self.port)
+ self.connected = True
+ except socket.error:
+ self.connected = False
+ return
+ self.authorized = self.authorize(self.passwd)
+ self.version = self.exchange_versions()
+
+ def disconnect(self):
+ self.rpc.disconnect()
+
+ def authorize(self, password):
+ ''' Request authorization. If password is None and we are connecting
+ to localhost, try to read password from the local config file
+ GUI_RPC_PASSWD_FILE. If file can't be read (not found or no
+ permission to read), try to authorize with a blank password.
+ If authorization is requested and fails, all subsequent calls
+ will be refused with socket.error 'Connection reset by peer' (104).
+ Since most local calls do no require authorization, do not attempt
+ it if you're not sure about the password.
+ '''
+ if password is None and not self.hostname:
+ password = read_gui_rpc_password() or ""
+ nonce = self.rpc.call('<auth1/>').text
+ authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower()
+ reply = self.rpc.call('<auth2><nonce_hash>{0}</nonce_hash></auth2>'.format(authhash))
+
+ if reply.tag == 'authorized':
+ return True
+ else:
+ return False
+
+ def exchange_versions(self):
+ ''' Return VersionInfo instance with core client version info '''
+ return VersionInfo.parse(self.rpc.call('<exchange_versions/>'))
+
+ def get_tasks(self):
+ ''' Same as get_results(active_only=False) '''
+ return self.get_results(False)
+
+ def get_results(self, active_only=False):
+ ''' Get a list of results.
+ Those that are in progress will have information such as CPU time
+ and fraction done. Each result includes a name;
+ Use CC_STATE::lookup_result() to find this result in the current static state;
+ if it's not there, call get_state() again.
+ '''
+ reply = self.rpc.call("<get_results><active_only>{0}</active_only></get_results>".format(1 if active_only else 0))
+ if not reply.tag == 'results':
+ return []
+
+ results = []
+ for item in list(reply):
+ results.append(Result.parse(item))
+
+ return results
+
+
+def read_gui_rpc_password():
+ ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR
+ (if any), and return it
+ '''
+ try:
+ with open(GUI_RPC_PASSWD_FILE, 'r') as f:
+ buf = f.read()
+ if buf.endswith('\n'): return buf[:-1] # trim last CR
+ else: return buf
+ except IOError:
+ # Permission denied or File not found.
+ pass
diff --git a/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
new file mode 100644
index 0000000..f873eac
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
@@ -0,0 +1,327 @@
+# SPDX-License-Identifier: LGPL-2.1
+"""
+@package sensors.py
+Python Bindings for libsensors3
+
+use the documentation of libsensors for the low level API.
+see example.py for high level API usage.
+
+@author: Pavel Rojtberg (http://www.rojtberg.net)
+@see: https://github.com/paroj/sensors.py
+@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1>
+"""
+
+from ctypes import *
+import ctypes.util
+
+_libc = cdll.LoadLibrary(ctypes.util.find_library("c"))
+# see https://github.com/paroj/sensors.py/issues/1
+_libc.free.argtypes = [c_void_p]
+
+_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors"))
+
+version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii")
+
+
+class SensorsError(Exception):
+ pass
+
+
+class ErrorWildcards(SensorsError):
+ pass
+
+
+class ErrorNoEntry(SensorsError):
+ pass
+
+
+class ErrorAccessRead(SensorsError, OSError):
+ pass
+
+
+class ErrorKernel(SensorsError, OSError):
+ pass
+
+
+class ErrorDivZero(SensorsError, ZeroDivisionError):
+ pass
+
+
+class ErrorChipName(SensorsError):
+ pass
+
+
+class ErrorBusName(SensorsError):
+ pass
+
+
+class ErrorParse(SensorsError):
+ pass
+
+
+class ErrorAccessWrite(SensorsError, OSError):
+ pass
+
+
+class ErrorIO(SensorsError, IOError):
+ pass
+
+
+class ErrorRecursion(SensorsError):
+ pass
+
+
+_ERR_MAP = {
+ 1: ErrorWildcards,
+ 2: ErrorNoEntry,
+ 3: ErrorAccessRead,
+ 4: ErrorKernel,
+ 5: ErrorDivZero,
+ 6: ErrorChipName,
+ 7: ErrorBusName,
+ 8: ErrorParse,
+ 9: ErrorAccessWrite,
+ 10: ErrorIO,
+ 11: ErrorRecursion
+}
+
+
+def raise_sensor_error(errno, message=''):
+ raise _ERR_MAP[abs(errno)](message)
+
+
+class bus_id(Structure):
+ _fields_ = [("type", c_short),
+ ("nr", c_short)]
+
+
+class chip_name(Structure):
+ _fields_ = [("prefix", c_char_p),
+ ("bus", bus_id),
+ ("addr", c_int),
+ ("path", c_char_p)]
+
+
+class feature(Structure):
+ _fields_ = [("name", c_char_p),
+ ("number", c_int),
+ ("type", c_int)]
+
+ # sensors_feature_type
+ IN = 0x00
+ FAN = 0x01
+ TEMP = 0x02
+ POWER = 0x03
+ ENERGY = 0x04
+ CURR = 0x05
+ HUMIDITY = 0x06
+ MAX_MAIN = 0x7
+ VID = 0x10
+ INTRUSION = 0x11
+ MAX_OTHER = 0x12
+ BEEP_ENABLE = 0x18
+
+
+class subfeature(Structure):
+ _fields_ = [("name", c_char_p),
+ ("number", c_int),
+ ("type", c_int),
+ ("mapping", c_int),
+ ("flags", c_uint)]
+
+
+_hdl.sensors_get_detected_chips.restype = POINTER(chip_name)
+_hdl.sensors_get_features.restype = POINTER(feature)
+_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature)
+_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it
+_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not
+_hdl.sensors_strerror.restype = c_char_p
+
+### RAW API ###
+MODE_R = 1
+MODE_W = 2
+COMPUTE_MAPPING = 4
+
+
+def init(cfg_file=None):
+ file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None
+
+ result = _hdl.sensors_init(file)
+ if result != 0:
+ raise_sensor_error(result, "sensors_init failed")
+
+ if file is not None:
+ _libc.fclose(file)
+
+
+def cleanup():
+ _hdl.sensors_cleanup()
+
+
+def parse_chip_name(orig_name):
+ ret = chip_name()
+ err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret))
+
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+ return ret
+
+
+def strerror(errnum):
+ return _hdl.sensors_strerror(errnum).decode("utf-8")
+
+
+def free_chip_name(chip):
+ _hdl.sensors_free_chip_name(byref(chip))
+
+
+def get_detected_chips(match, nr):
+ """
+ @return: (chip, next nr to query)
+ """
+ _nr = c_int(nr)
+
+ if match is not None:
+ match = byref(match)
+
+ chip = _hdl.sensors_get_detected_chips(match, byref(_nr))
+ chip = chip.contents if bool(chip) else None
+ return chip, _nr.value
+
+
+def chip_snprintf_name(chip, buffer_size=200):
+ """
+ @param buffer_size defaults to the size used in the sensors utility
+ """
+ ret = create_string_buffer(buffer_size)
+ err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip))
+
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+ return ret.value.decode("utf-8")
+
+
+def do_chip_sets(chip):
+ """
+ @attention this function was not tested
+ """
+ err = _hdl.sensors_do_chip_sets(byref(chip))
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+
+def get_adapter_name(bus):
+ return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8")
+
+
+def get_features(chip, nr):
+ """
+ @return: (feature, next nr to query)
+ """
+ _nr = c_int(nr)
+ feature = _hdl.sensors_get_features(byref(chip), byref(_nr))
+ feature = feature.contents if bool(feature) else None
+ return feature, _nr.value
+
+
+def get_label(chip, feature):
+ ptr = _hdl.sensors_get_label(byref(chip), byref(feature))
+ val = cast(ptr, c_char_p).value.decode("utf-8")
+ _libc.free(ptr)
+ return val
+
+
+def get_all_subfeatures(chip, feature, nr):
+ """
+ @return: (subfeature, next nr to query)
+ """
+ _nr = c_int(nr)
+ subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr))
+ subfeature = subfeature.contents if bool(subfeature) else None
+ return subfeature, _nr.value
+
+
+def get_value(chip, subfeature_nr):
+ val = c_double()
+ err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val))
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+ return val.value
+
+
+def set_value(chip, subfeature_nr, value):
+ """
+ @attention this function was not tested
+ """
+ val = c_double(value)
+ err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val))
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+
+### Convenience API ###
+class ChipIterator:
+ def __init__(self, match=None):
+ self.match = parse_chip_name(match) if match is not None else None
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ chip, self.nr = get_detected_chips(self.match, self.nr)
+
+ if chip is None:
+ raise StopIteration
+
+ return chip
+
+ def __del__(self):
+ if self.match is not None:
+ free_chip_name(self.match)
+
+ def next(self): # python2 compability
+ return self.__next__()
+
+
+class FeatureIterator:
+ def __init__(self, chip):
+ self.chip = chip
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ feature, self.nr = get_features(self.chip, self.nr)
+
+ if feature is None:
+ raise StopIteration
+
+ return feature
+
+ def next(self): # python2 compability
+ return self.__next__()
+
+
+class SubFeatureIterator:
+ def __init__(self, chip, feature):
+ self.chip = chip
+ self.feature = feature
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr)
+
+ if subfeature is None:
+ raise StopIteration
+
+ return subfeature
+
+ def next(self): # python2 compability
+ return self.__next__()
diff --git a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
new file mode 100644
index 0000000..a65a304
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
@@ -0,0 +1,74 @@
+# Minecraft Remote Console module.
+#
+# Copyright (C) 2015 Barnaby Gale
+#
+# SPDX-License-Identifier: MIT
+
+import socket
+import select
+import struct
+import time
+
+
+class MCRconException(Exception):
+ pass
+
+
+class MCRcon(object):
+ socket = None
+
+ def connect(self, host, port, password):
+ if self.socket is not None:
+ raise MCRconException("Already connected")
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.socket.settimeout(0.9)
+ self.socket.connect((host, port))
+ self.send(3, password)
+
+ def disconnect(self):
+ if self.socket is None:
+ raise MCRconException("Already disconnected")
+ self.socket.close()
+ self.socket = None
+
+ def read(self, length):
+ data = b""
+ while len(data) < length:
+ data += self.socket.recv(length - len(data))
+ return data
+
+ def send(self, out_type, out_data):
+ if self.socket is None:
+ raise MCRconException("Must connect before sending data")
+
+ # Send a request packet
+ out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00'
+ out_length = struct.pack('<i', len(out_payload))
+ self.socket.send(out_length + out_payload)
+
+ # Read response packets
+ in_data = ""
+ while True:
+ # Read a packet
+ in_length, = struct.unpack('<i', self.read(4))
+ in_payload = self.read(in_length)
+ in_id = struct.unpack('<ii', in_payload[:8])
+ in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
+
+ # Sanity checks
+ if in_padding != b'\x00\x00':
+ raise MCRconException("Incorrect padding")
+ if in_id == -1:
+ raise MCRconException("Login failed")
+
+ # Record the response
+ in_data += in_data_partial.decode('utf8')
+
+ # If there's nothing more to receive, return the response
+ if len(select.select([self.socket], [], [], 0)[0]) == 0:
+ return in_data
+
+ def command(self, command):
+ result = self.send(2, command)
+ time.sleep(0.003) # MC-72390 workaround
+ return result
diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
new file mode 100644
index 0000000..da04bb8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: Apache-2.0
+"""
+ monotonic
+ ~~~~~~~~~
+
+ This module provides a ``monotonic()`` function which returns the
+ value (in fractional seconds) of a clock which never goes backwards.
+
+ On Python 3.3 or newer, ``monotonic`` will be an alias of
+ ``time.monotonic`` from the standard library. On older versions,
+ it will fall back to an equivalent implementation:
+
+ +-------------+----------------------------------------+
+ | Linux, BSD | ``clock_gettime(3)`` |
+ +-------------+----------------------------------------+
+ | Windows | ``GetTickCount`` or ``GetTickCount64`` |
+ +-------------+----------------------------------------+
+ | OS X | ``mach_absolute_time`` |
+ +-------------+----------------------------------------+
+
+ If no suitable implementation exists for the current platform,
+ attempting to import this module (or to import from it) will
+ cause a ``RuntimeError`` exception to be raised.
+
+
+ Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org>
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+"""
+import time
+
+
+__all__ = ('monotonic',)
+
+
+try:
+ monotonic = time.monotonic
+except AttributeError:
+ import ctypes
+ import ctypes.util
+ import os
+ import sys
+ import threading
+ try:
+ if sys.platform == 'darwin': # OS X, iOS
+ # See Technical Q&A QA1398 of the Mac Developer Library:
+ # <https://developer.apple.com/library/mac/qa/qa1398/>
+ libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
+
+ class mach_timebase_info_data_t(ctypes.Structure):
+ """System timebase info. Defined in <mach/mach_time.h>."""
+ _fields_ = (('numer', ctypes.c_uint32),
+ ('denom', ctypes.c_uint32))
+
+ mach_absolute_time = libc.mach_absolute_time
+ mach_absolute_time.restype = ctypes.c_uint64
+
+ timebase = mach_timebase_info_data_t()
+ libc.mach_timebase_info(ctypes.byref(timebase))
+ ticks_per_second = timebase.numer / timebase.denom * 1.0e9
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return mach_absolute_time() / ticks_per_second
+
+ elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
+ if sys.platform.startswith('cygwin'):
+ # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since
+ # version 1.7.6. Using raw WinAPI for maximum version compatibility.
+
+ # Ugly hack using the wrong calling convention (in 32-bit mode)
+ # because ctypes has no windll under cygwin (and it also seems that
+ # the code letting you select stdcall in _ctypes doesn't exist under
+ # the preprocessor definitions relevant to cygwin).
+ # This is 'safe' because:
+ # 1. The ABI of GetTickCount and GetTickCount64 is identical for
+ # both calling conventions because they both have no parameters.
+ # 2. libffi masks the problem because after making the call it doesn't
+ # touch anything through esp and epilogue code restores a correct
+ # esp from ebp afterwards.
+ try:
+ kernel32 = ctypes.cdll.kernel32
+ except OSError: # 'No such file or directory'
+ kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll')
+ else:
+ kernel32 = ctypes.windll.kernel32
+
+ GetTickCount64 = getattr(kernel32, 'GetTickCount64', None)
+ if GetTickCount64:
+ # Windows Vista / Windows Server 2008 or newer.
+ GetTickCount64.restype = ctypes.c_ulonglong
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return GetTickCount64() / 1000.0
+
+ else:
+ # Before Windows Vista.
+ GetTickCount = kernel32.GetTickCount
+ GetTickCount.restype = ctypes.c_uint32
+
+ get_tick_count_lock = threading.Lock()
+ get_tick_count_last_sample = 0
+ get_tick_count_wraparounds = 0
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ global get_tick_count_last_sample
+ global get_tick_count_wraparounds
+
+ with get_tick_count_lock:
+ current_sample = GetTickCount()
+ if current_sample < get_tick_count_last_sample:
+ get_tick_count_wraparounds += 1
+ get_tick_count_last_sample = current_sample
+
+ final_milliseconds = get_tick_count_wraparounds << 32
+ final_milliseconds += get_tick_count_last_sample
+ return final_milliseconds / 1000.0
+
+ else:
+ try:
+ clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
+ use_errno=True).clock_gettime
+ except Exception:
+ clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
+ use_errno=True).clock_gettime
+
+ class timespec(ctypes.Structure):
+ """Time specification, as described in clock_gettime(3)."""
+ _fields_ = (('tv_sec', ctypes.c_long),
+ ('tv_nsec', ctypes.c_long))
+
+ if sys.platform.startswith('linux'):
+ CLOCK_MONOTONIC = 1
+ elif sys.platform.startswith('freebsd'):
+ CLOCK_MONOTONIC = 4
+ elif sys.platform.startswith('sunos5'):
+ CLOCK_MONOTONIC = 4
+ elif 'bsd' in sys.platform:
+ CLOCK_MONOTONIC = 3
+ elif sys.platform.startswith('aix'):
+ CLOCK_MONOTONIC = ctypes.c_longlong(10)
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ ts = timespec()
+ if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
+ errno = ctypes.get_errno()
+ raise OSError(errno, os.strerror(errno))
+ return ts.tv_sec + ts.tv_nsec / 1.0e9
+
+ # Perform a sanity-check.
+ if monotonic() - monotonic() > 0:
+ raise ValueError('monotonic() is not monotonic!')
+
+ except Exception as e:
+ raise RuntimeError('no suitable implementation for this system: ' + repr(e))
diff --git a/collectors/python.d.plugin/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
new file mode 100644
index 0000000..589401b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
@@ -0,0 +1,110 @@
+# Copyright (c) 2009 Raymond Hettinger
+#
+# SPDX-License-Identifier: MIT
+
+from UserDict import DictMixin
+
+
+class OrderedDict(dict, DictMixin):
+
+ def __init__(self, *args, **kwds):
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__end
+ except AttributeError:
+ self.clear()
+ self.update(*args, **kwds)
+
+ def clear(self):
+ self.__end = end = []
+ end += [None, end, end] # sentinel node for doubly linked list
+ self.__map = {} # key --> [key, prev, next]
+ dict.clear(self)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ end = self.__end
+ curr = end[1]
+ curr[2] = end[1] = self.__map[key] = [key, curr, end]
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ key, prev, next = self.__map.pop(key)
+ prev[2] = next
+ next[1] = prev
+
+ def __iter__(self):
+ end = self.__end
+ curr = end[2]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[2]
+
+ def __reversed__(self):
+ end = self.__end
+ curr = end[1]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[1]
+
+ def popitem(self, last=True):
+ if not self:
+ raise KeyError('dictionary is empty')
+ if last:
+ key = reversed(self).next()
+ else:
+ key = iter(self).next()
+ value = self.pop(key)
+ return key, value
+
+ def __reduce__(self):
+ items = [[k, self[k]] for k in self]
+ tmp = self.__map, self.__end
+ del self.__map, self.__end
+ inst_dict = vars(self).copy()
+ self.__map, self.__end = tmp
+ if inst_dict:
+ return self.__class__, (items,), inst_dict
+ return self.__class__, (items,)
+
+ def keys(self):
+ return list(self)
+
+ setdefault = DictMixin.setdefault
+ update = DictMixin.update
+ pop = DictMixin.pop
+ values = DictMixin.values
+ items = DictMixin.items
+ iterkeys = DictMixin.iterkeys
+ itervalues = DictMixin.itervalues
+ iteritems = DictMixin.iteritems
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+
+ def copy(self):
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ if isinstance(other, OrderedDict):
+ if len(self) != len(other):
+ return False
+ for p, q in zip(self.items(), other.items()):
+ if p != q:
+ return False
+ return True
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/collectors/python.d.plugin/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
new file mode 100644
index 0000000..3add848
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: MIT
+"""
+urllib3 - Thread-safe connection pooling and re-using.
+"""
+
+from __future__ import absolute_import
+import warnings
+
+from .connectionpool import (
+ HTTPConnectionPool,
+ HTTPSConnectionPool,
+ connection_from_url
+)
+
+from . import exceptions
+from .filepost import encode_multipart_formdata
+from .poolmanager import PoolManager, ProxyManager, proxy_from_url
+from .response import HTTPResponse
+from .util.request import make_headers
+from .util.url import get_host
+from .util.timeout import Timeout
+from .util.retry import Retry
+
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+try: # Python 2.7+
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
+__license__ = 'MIT'
+__version__ = '1.21.1'
+
+__all__ = (
+ 'HTTPConnectionPool',
+ 'HTTPSConnectionPool',
+ 'PoolManager',
+ 'ProxyManager',
+ 'HTTPResponse',
+ 'Retry',
+ 'Timeout',
+ 'add_stderr_logger',
+ 'connection_from_url',
+ 'disable_warnings',
+ 'encode_multipart_formdata',
+ 'get_host',
+ 'make_headers',
+ 'proxy_from_url',
+)
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+
+def add_stderr_logger(level=logging.DEBUG):
+ """
+ Helper for quickly adding a StreamHandler to the logger. Useful for
+ debugging.
+
+ Returns the handler after adding it.
+ """
+ # This method needs to be in this __init__.py to get the __name__ correct
+ # even if urllib3 is vendored within another package.
+ logger = logging.getLogger(__name__)
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
+ logger.addHandler(handler)
+ logger.setLevel(level)
+ logger.debug('Added a stderr logging handler to logger: %s', __name__)
+ return handler
+
+
+# ... Clean up.
+del NullHandler
+
+
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
+# SecurityWarning's always go off by default.
+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
+ append=True)
+# SNIMissingWarnings should go off only once.
+warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
+
+
+def disable_warnings(category=exceptions.HTTPWarning):
+ """
+ Helper for quickly disabling all urllib3 warnings.
+ """
+ warnings.simplefilter('ignore', category)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
new file mode 100644
index 0000000..c1d2fad
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
@@ -0,0 +1,315 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from collections import Mapping, MutableMapping
+try:
+ from threading import RLock
+except ImportError: # Platform-specific: No threads available
+ class RLock:
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+try: # Python 2.7+
+ from collections import OrderedDict
+except ImportError:
+ from .packages.ordered_dict import OrderedDict
+from .packages.six import iterkeys, itervalues, PY3
+
+
+__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
+
+
+_Null = object()
+
+
+class RecentlyUsedContainer(MutableMapping):
+ """
+ Provides a thread-safe dict-like container which maintains up to
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
+ ``maxsize``.
+
+ :param maxsize:
+ Maximum number of recent elements to retain.
+
+ :param dispose_func:
+ Every time an item is evicted from the container,
+ ``dispose_func(value)`` is called. Callback which will get called
+ """
+
+ ContainerCls = OrderedDict
+
+ def __init__(self, maxsize=10, dispose_func=None):
+ self._maxsize = maxsize
+ self.dispose_func = dispose_func
+
+ self._container = self.ContainerCls()
+ self.lock = RLock()
+
+ def __getitem__(self, key):
+ # Re-insert the item, moving it to the end of the eviction line.
+ with self.lock:
+ item = self._container.pop(key)
+ self._container[key] = item
+ return item
+
+ def __setitem__(self, key, value):
+ evicted_value = _Null
+ with self.lock:
+ # Possibly evict the existing value of 'key'
+ evicted_value = self._container.get(key, _Null)
+ self._container[key] = value
+
+ # If we didn't evict an existing value, we might have to evict the
+ # least recently used item from the beginning of the container.
+ if len(self._container) > self._maxsize:
+ _key, evicted_value = self._container.popitem(last=False)
+
+ if self.dispose_func and evicted_value is not _Null:
+ self.dispose_func(evicted_value)
+
+ def __delitem__(self, key):
+ with self.lock:
+ value = self._container.pop(key)
+
+ if self.dispose_func:
+ self.dispose_func(value)
+
+ def __len__(self):
+ with self.lock:
+ return len(self._container)
+
+ def __iter__(self):
+ raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
+
+ def clear(self):
+ with self.lock:
+ # Copy pointers to all values, then wipe the mapping
+ values = list(itervalues(self._container))
+ self._container.clear()
+
+ if self.dispose_func:
+ for value in values:
+ self.dispose_func(value)
+
+ def keys(self):
+ with self.lock:
+ return list(iterkeys(self._container))
+
+
+class HTTPHeaderDict(MutableMapping):
+ """
+ :param headers:
+ An iterable of field-value pairs. Must not contain multiple field names
+ when compared case-insensitively.
+
+ :param kwargs:
+ Additional field-value pairs to pass in to ``dict.update``.
+
+ A ``dict`` like container for storing HTTP Headers.
+
+ Field names are stored and compared case-insensitively in compliance with
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
+ case-insensitive pair.
+
+ Using ``__setitem__`` syntax overwrites fields that compare equal
+ case-insensitively in order to maintain ``dict``'s api. For fields that
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+ in a loop.
+
+ If multiple fields that are equal case-insensitively are passed to the
+ constructor or ``.update``, the behavior is undefined and some will be
+ lost.
+
+ >>> headers = HTTPHeaderDict()
+ >>> headers.add('Set-Cookie', 'foo=bar')
+ >>> headers.add('set-cookie', 'baz=quxx')
+ >>> headers['content-length'] = '7'
+ >>> headers['SET-cookie']
+ 'foo=bar, baz=quxx'
+ >>> headers['Content-Length']
+ '7'
+ """
+
+ def __init__(self, headers=None, **kwargs):
+ super(HTTPHeaderDict, self).__init__()
+ self._container = OrderedDict()
+ if headers is not None:
+ if isinstance(headers, HTTPHeaderDict):
+ self._copy_from(headers)
+ else:
+ self.extend(headers)
+ if kwargs:
+ self.extend(kwargs)
+
+ def __setitem__(self, key, val):
+ self._container[key.lower()] = [key, val]
+ return self._container[key.lower()]
+
+ def __getitem__(self, key):
+ val = self._container[key.lower()]
+ return ', '.join(val[1:])
+
+ def __delitem__(self, key):
+ del self._container[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self._container
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
+ return False
+ if not isinstance(other, type(self)):
+ other = type(self)(other)
+ return (dict((k.lower(), v) for k, v in self.itermerged()) ==
+ dict((k.lower(), v) for k, v in other.itermerged()))
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if not PY3: # Python 2
+ iterkeys = MutableMapping.iterkeys
+ itervalues = MutableMapping.itervalues
+
+ __marker = object()
+
+ def __len__(self):
+ return len(self._container)
+
+ def __iter__(self):
+ # Only provide the originally cased names
+ for vals in self._container.values():
+ yield vals[0]
+
+ def pop(self, key, default=__marker):
+ '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ '''
+ # Using the MutableMapping function directly fails due to the private marker.
+ # Using ordinary dict.pop would expose the internal structures.
+ # So let's reinvent the wheel.
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def discard(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def add(self, key, val):
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
+ exists.
+
+ >>> headers = HTTPHeaderDict(foo='bar')
+ >>> headers.add('Foo', 'baz')
+ >>> headers['foo']
+ 'bar, baz'
+ """
+ key_lower = key.lower()
+ new_vals = [key, val]
+ # Keep the common case aka no item present as fast as possible
+ vals = self._container.setdefault(key_lower, new_vals)
+ if new_vals is not vals:
+ vals.append(val)
+
+ def extend(self, *args, **kwargs):
+ """Generic import function for any type of header-like object.
+ Adapted version of MutableMapping.update in order to insert items
+ with self.add instead of self.__setitem__
+ """
+ if len(args) > 1:
+ raise TypeError("extend() takes at most 1 positional "
+ "arguments ({0} given)".format(len(args)))
+ other = args[0] if len(args) >= 1 else ()
+
+ if isinstance(other, HTTPHeaderDict):
+ for key, val in other.iteritems():
+ self.add(key, val)
+ elif isinstance(other, Mapping):
+ for key in other:
+ self.add(key, other[key])
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self.add(key, other[key])
+ else:
+ for key, value in other:
+ self.add(key, value)
+
+ for key, value in kwargs.items():
+ self.add(key, value)
+
+ def getlist(self, key):
+ """Returns a list of all the values for the named field. Returns an
+ empty list if the key doesn't exist."""
+ try:
+ vals = self._container[key.lower()]
+ except KeyError:
+ return []
+ else:
+ return vals[1:]
+
+ # Backwards compatibility for httplib
+ getheaders = getlist
+ getallmatchingheaders = getlist
+ iget = getlist
+
+ def __repr__(self):
+ return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+ def _copy_from(self, other):
+ for key in other:
+ val = other.getlist(key)
+ if isinstance(val, list):
+ # Don't need to convert tuples
+ val = list(val)
+ self._container[key.lower()] = [key] + val
+
+ def copy(self):
+ clone = type(self)()
+ clone._copy_from(self)
+ return clone
+
+ def iteritems(self):
+ """Iterate over all header lines, including duplicate ones."""
+ for key in self:
+ vals = self._container[key.lower()]
+ for val in vals[1:]:
+ yield vals[0], val
+
+ def itermerged(self):
+ """Iterate over all headers, merging duplicate ones together."""
+ for key in self:
+ val = self._container[key.lower()]
+ yield val[0], ', '.join(val[1:])
+
+ def items(self):
+ return list(self.iteritems())
+
+ @classmethod
+ def from_httplib(cls, message): # Python 2
+ """Read headers from a Python 2 httplib message object."""
+ # python2.7 does not expose a proper API for exporting multiheaders
+ # efficiently. This function re-reads raw lines from the message
+ # object and extracts the multiheaders properly.
+ headers = []
+
+ for line in message.headers:
+ if line.startswith((' ', '\t')):
+ key, value = headers[-1]
+ headers[-1] = (key, value + '\r\n' + line.rstrip())
+ continue
+
+ key, value = line.split(':', 1)
+ headers.append((key, value.strip()))
+
+ return cls(headers)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py
new file mode 100644
index 0000000..f757493
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/connection.py
@@ -0,0 +1,374 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import datetime
+import logging
+import os
+import sys
+import socket
+from socket import error as SocketError, timeout as SocketTimeout
+import warnings
+from .packages import six
+from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
+from .packages.six.moves.http_client import HTTPException # noqa: F401
+
+try: # Compiled with SSL?
+ import ssl
+ BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError): # Platform-specific: No SSL.
+ ssl = None
+
+ class BaseSSLError(BaseException):
+ pass
+
+
+try: # Python 3:
+ # Not a no-op, we're adding this to the namespace so it can be imported.
+ ConnectionError = ConnectionError
+except NameError: # Python 2:
+ class ConnectionError(Exception):
+ pass
+
+
+from .exceptions import (
+ NewConnectionError,
+ ConnectTimeoutError,
+ SubjectAltNameWarning,
+ SystemTimeWarning,
+)
+from .packages.ssl_match_hostname import match_hostname, CertificateError
+
+from .util.ssl_ import (
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ assert_fingerprint,
+ create_urllib3_context,
+ ssl_wrap_socket
+)
+
+
+from .util import connection
+
+from ._collections import HTTPHeaderDict
+
+log = logging.getLogger(__name__)
+
+port_by_scheme = {
+ 'http': 80,
+ 'https': 443,
+}
+
+# When updating RECENT_DATE, move it to
+# within two years of the current date, and no
+# earlier than 6 months ago.
+RECENT_DATE = datetime.date(2016, 1, 1)
+
+
+class DummyConnection(object):
+ """Used to detect a failed ConnectionCls import."""
+ pass
+
+
+class HTTPConnection(_HTTPConnection, object):
+ """
+ Based on httplib.HTTPConnection but provides an extra constructor
+ backwards-compatibility layer between older and newer Pythons.
+
+ Additional keyword parameters are used to configure attributes of the connection.
+ Accepted parameters include:
+
+ - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+ - ``source_address``: Set the source address for the current connection.
+
+ .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
+
+ - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+ defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+ Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+ For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+ you might pass::
+
+ HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ]
+
+ Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+ """
+
+ default_port = port_by_scheme['http']
+
+ #: Disable Nagle's algorithm by default.
+ #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+ default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+ #: Whether this connection verifies the host's certificate.
+ is_verified = False
+
+ def __init__(self, *args, **kw):
+ if six.PY3: # Python 3
+ kw.pop('strict', None)
+
+ # Pre-set source_address in case we have an older Python like 2.6.
+ self.source_address = kw.get('source_address')
+
+ if sys.version_info < (2, 7): # Python 2.6
+ # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
+ # not newer versions. We can still use it when creating a
+ # connection though, so we pop it *after* we have saved it as
+ # self.source_address.
+ kw.pop('source_address', None)
+
+ #: The socket options provided by the user. If no options are
+ #: provided, we use the default options.
+ self.socket_options = kw.pop('socket_options', self.default_socket_options)
+
+ # Superclass also sets self.source_address in Python 2.7+.
+ _HTTPConnection.__init__(self, *args, **kw)
+
+ def _new_conn(self):
+ """ Establish a socket connection and set nodelay settings on it.
+
+ :return: New socket connection.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = connection.create_connection(
+ (self.host, self.port), self.timeout, **extra_kw)
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except SocketError as e:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+ def _prepare_conn(self, conn):
+ self.sock = conn
+ # the _tunnel_host attribute was added in python 2.6.3 (via
+ # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
+ # not have them.
+ if getattr(self, '_tunnel_host', None):
+ # TODO: Fix tunnel so it doesn't depend on self.sock state.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ def request_chunked(self, method, url, body=None, headers=None):
+ """
+ Alternative to the common request method, which sends the
+ body with chunked encoding and not as one block
+ """
+ headers = HTTPHeaderDict(headers if headers is not None else {})
+ skip_accept_encoding = 'accept-encoding' in headers
+ skip_host = 'host' in headers
+ self.putrequest(
+ method,
+ url,
+ skip_accept_encoding=skip_accept_encoding,
+ skip_host=skip_host
+ )
+ for header, value in headers.items():
+ self.putheader(header, value)
+ if 'transfer-encoding' not in headers:
+ self.putheader('Transfer-Encoding', 'chunked')
+ self.endheaders()
+
+ if body is not None:
+ stringish_types = six.string_types + (six.binary_type,)
+ if isinstance(body, stringish_types):
+ body = (body,)
+ for chunk in body:
+ if not chunk:
+ continue
+ if not isinstance(chunk, six.binary_type):
+ chunk = chunk.encode('utf8')
+ len_str = hex(len(chunk))[2:]
+ self.send(len_str.encode('utf-8'))
+ self.send(b'\r\n')
+ self.send(chunk)
+ self.send(b'\r\n')
+
+ # After the if clause, to always have a closed body
+ self.send(b'0\r\n\r\n')
+
+
+class HTTPSConnection(HTTPConnection):
+ default_port = port_by_scheme['https']
+
+ ssl_version = None
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ ssl_context=None, **kw):
+
+ HTTPConnection.__init__(self, host, port, strict=strict,
+ timeout=timeout, **kw)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.ssl_context = ssl_context
+
+ # Required property for Google AppEngine 1.9.0 which otherwise causes
+ # HTTPS requests to go out as HTTP. (See Issue #356)
+ self._protocol = 'https'
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(None),
+ cert_reqs=resolve_cert_reqs(None),
+ )
+
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ssl_context=self.ssl_context,
+ )
+
+
+class VerifiedHTTPSConnection(HTTPSConnection):
+ """
+ Based on httplib.HTTPSConnection but wraps the socket with
+ SSL certification.
+ """
+ cert_reqs = None
+ ca_certs = None
+ ca_cert_dir = None
+ ssl_version = None
+ assert_fingerprint = None
+
+ def set_cert(self, key_file=None, cert_file=None,
+ cert_reqs=None, ca_certs=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None):
+ """
+ This method should only be called once, before the connection is used.
+ """
+ # If cert_reqs is not provided, we can try to guess. If the user gave
+ # us a cert database, we assume they want to use it: otherwise, if
+ # they gave us an SSL Context object we should use whatever is set for
+ # it.
+ if cert_reqs is None:
+ if ca_certs or ca_cert_dir:
+ cert_reqs = 'CERT_REQUIRED'
+ elif self.ssl_context is not None:
+ cert_reqs = self.ssl_context.verify_mode
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+
+ def connect(self):
+ # Add certificate verification
+ conn = self._new_conn()
+
+ hostname = self.host
+ if getattr(self, '_tunnel_host', None):
+ # _tunnel_host was added in Python 2.6.3
+ # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
+
+ self.sock = conn
+ # Calls self._set_hostport(), so self.host is
+ # self._tunnel_host below.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ # Override the host with the one we're requesting data from.
+ hostname = self._tunnel_host
+
+ is_time_off = datetime.date.today() < RECENT_DATE
+ if is_time_off:
+ warnings.warn((
+ 'System time is way off (before {0}). This will probably '
+ 'lead to SSL verification errors').format(RECENT_DATE),
+ SystemTimeWarning
+ )
+
+ # Wrap socket using verification with the root certs in
+ # trusted_root_certs
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(self.ssl_version),
+ cert_reqs=resolve_cert_reqs(self.cert_reqs),
+ )
+
+ context = self.ssl_context
+ context.verify_mode = resolve_cert_reqs(self.cert_reqs)
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ server_hostname=hostname,
+ ssl_context=context)
+
+ if self.assert_fingerprint:
+ assert_fingerprint(self.sock.getpeercert(binary_form=True),
+ self.assert_fingerprint)
+ elif context.verify_mode != ssl.CERT_NONE \
+ and not getattr(context, 'check_hostname', False) \
+ and self.assert_hostname is not False:
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = self.sock.getpeercert()
+ if not cert.get('subjectAltName', ()):
+ warnings.warn((
+ 'Certificate for {0} has no `subjectAltName`, falling back to check for a '
+ '`commonName` for now. This feature is being removed by major browsers and '
+ 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
+ 'for details.)'.format(hostname)),
+ SubjectAltNameWarning
+ )
+ _match_hostname(cert, self.assert_hostname or hostname)
+
+ self.is_verified = (
+ context.verify_mode == ssl.CERT_REQUIRED or
+ self.assert_fingerprint is not None
+ )
+
+
+def _match_hostname(cert, asserted_hostname):
+ try:
+ match_hostname(cert, asserted_hostname)
+ except CertificateError as e:
+ log.error(
+ 'Certificate did not match expected hostname: %s. '
+ 'Certificate: %s', asserted_hostname, cert
+ )
+ # Add cert to exception and reraise so client code can inspect
+ # the cert when catching the exception, if they want to
+ e._peer_cert = cert
+ raise
+
+
+if ssl:
+ # Make a copy for testing.
+ UnverifiedHTTPSConnection = HTTPSConnection
+ HTTPSConnection = VerifiedHTTPSConnection
+else:
+ HTTPSConnection = DummyConnection
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
new file mode 100644
index 0000000..90e4c86
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
@@ -0,0 +1,900 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import errno
+import logging
+import sys
+import warnings
+
+from socket import error as SocketError, timeout as SocketTimeout
+import socket
+
+
+from .exceptions import (
+ ClosedPoolError,
+ ProtocolError,
+ EmptyPoolError,
+ HeaderParsingError,
+ HostChangedError,
+ LocationValueError,
+ MaxRetryError,
+ ProxyError,
+ ReadTimeoutError,
+ SSLError,
+ TimeoutError,
+ InsecureRequestWarning,
+ NewConnectionError,
+)
+from .packages.ssl_match_hostname import CertificateError
+from .packages import six
+from .packages.six.moves import queue
+from .connection import (
+ port_by_scheme,
+ DummyConnection,
+ HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
+ HTTPException, BaseSSLError,
+)
+from .request import RequestMethods
+from .response import HTTPResponse
+
+from .util.connection import is_connection_dropped
+from .util.request import set_file_position
+from .util.response import assert_header_parsing
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host, Url
+
+
+if six.PY2:
+ # Queue is imported for side effects on MS Windows
+ import Queue as _unused_module_Queue # noqa: F401
+
+xrange = six.moves.xrange
+
+log = logging.getLogger(__name__)
+
+_Default = object()
+
+
+# Pool objects
+class ConnectionPool(object):
+ """
+ Base class for all connection pools, such as
+ :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+ """
+
+ scheme = None
+ QueueCls = queue.LifoQueue
+
+ def __init__(self, host, port=None):
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ self.host = _ipv6_host(host).lower()
+ self.port = port
+
+ def __str__(self):
+ return '%s(host=%r, port=%r)' % (type(self).__name__,
+ self.host, self.port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ pass
+
+
+# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
+_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
+
+
+class HTTPConnectionPool(ConnectionPool, RequestMethods):
+ """
+ Thread-safe connection pool for one host.
+
+ :param host:
+ Host used for this HTTP Connection (e.g. "localhost"), passed into
+ :class:`httplib.HTTPConnection`.
+
+ :param port:
+ Port used for this HTTP Connection (None is equivalent to 80), passed
+ into :class:`httplib.HTTPConnection`.
+
+ :param strict:
+ Causes BadStatusLine to be raised if the status line can't be parsed
+ as a valid HTTP/1.0 or 1.1 status line, passed into
+ :class:`httplib.HTTPConnection`.
+
+ .. note::
+ Only works in Python 2. This parameter is ignored in Python 3.
+
+ :param timeout:
+ Socket timeout in seconds for each individual connection. This can
+ be a float or integer, which sets the timeout for the HTTP request,
+ or an instance of :class:`urllib3.util.Timeout` which gives you more
+ fine-grained control over request timeouts. After the constructor has
+ been parsed, this is always a `urllib3.util.Timeout` object.
+
+ :param maxsize:
+ Number of connections to save that can be reused. More than 1 is useful
+ in multithreaded situations. If ``block`` is set to False, more
+ connections will be created but they will not be saved once they've
+ been used.
+
+ :param block:
+ If set to True, no more than ``maxsize`` connections will be used at
+ a time. When no free connections are available, the call will block
+ until a connection has been released. This is a useful side effect for
+ particular multithreaded situations where one does not want to use more
+ than maxsize connections per host to prevent flooding.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param retries:
+ Retry configuration to use by default with requests in this pool.
+
+ :param _proxy:
+ Parsed proxy URL, should not be used directly, instead, see
+ :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param _proxy_headers:
+ A dictionary with proxy headers, should not be used directly,
+ instead, see :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param \\**conn_kw:
+ Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+ :class:`urllib3.connection.HTTPSConnection` instances.
+ """
+
+ scheme = 'http'
+ ConnectionCls = HTTPConnection
+ ResponseCls = HTTPResponse
+
+ def __init__(self, host, port=None, strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
+ headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ **conn_kw):
+ ConnectionPool.__init__(self, host, port)
+ RequestMethods.__init__(self, headers)
+
+ self.strict = strict
+
+ if not isinstance(timeout, Timeout):
+ timeout = Timeout.from_float(timeout)
+
+ if retries is None:
+ retries = Retry.DEFAULT
+
+ self.timeout = timeout
+ self.retries = retries
+
+ self.pool = self.QueueCls(maxsize)
+ self.block = block
+
+ self.proxy = _proxy
+ self.proxy_headers = _proxy_headers or {}
+
+ # Fill the queue up so that doing get() on it will block properly
+ for _ in xrange(maxsize):
+ self.pool.put(None)
+
+ # These are mostly for testing and debugging purposes.
+ self.num_connections = 0
+ self.num_requests = 0
+ self.conn_kw = conn_kw
+
+ if self.proxy:
+ # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+ # We cannot know if the user has added default socket options, so we cannot replace the
+ # list.
+ self.conn_kw.setdefault('socket_options', [])
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`HTTPConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTP connection (%d): %s",
+ self.num_connections, self.host)
+
+ conn = self.ConnectionCls(host=self.host, port=self.port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+ return conn
+
+ def _get_conn(self, timeout=None):
+ """
+ Get a connection. Will return a pooled connection if one is available.
+
+ If no connections are available and :prop:`.block` is ``False``, then a
+ fresh connection is returned.
+
+ :param timeout:
+ Seconds to wait before giving up and raising
+ :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
+ :prop:`.block` is ``True``.
+ """
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise EmptyPoolError(self,
+ "Pool reached maximum size and no more "
+ "connections are allowed.")
+ pass # Oh well, we'll create a new connection then
+
+ # If this is a persistent connection, check if it got disconnected
+ if conn and is_connection_dropped(conn):
+ log.debug("Resetting dropped connection: %s", self.host)
+ conn.close()
+ if getattr(conn, 'auto_open', 1) == 0:
+ # This is a proxied connection that has been mutated by
+ # httplib._tunnel() and cannot be reused (since it would
+ # attempt to bypass the proxy)
+ conn = None
+
+ return conn or self._new_conn()
+
+ def _put_conn(self, conn):
+ """
+ Put a connection back into the pool.
+
+ :param conn:
+ Connection object for the current host and port as returned by
+ :meth:`._new_conn` or :meth:`._get_conn`.
+
+ If the pool is already full, the connection is closed and discarded
+ because we exceeded maxsize. If connections are discarded frequently,
+ then maxsize should be increased.
+
+ If the pool is closed, then the connection will be closed and discarded.
+ """
+ try:
+ self.pool.put(conn, block=False)
+ return # Everything is dandy, done.
+ except AttributeError:
+ # self.pool is None.
+ pass
+ except queue.Full:
+ # This should never happen if self.block == True
+ log.warning(
+ "Connection pool is full, discarding connection: %s",
+ self.host)
+
+ # Connection never got put back into the pool, close it.
+ if conn:
+ conn.close()
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ pass
+
+ def _prepare_proxy(self, conn):
+ # Nothing to do for HTTP connections.
+ pass
+
+ def _get_timeout(self, timeout):
+ """ Helper that always returns a :class:`urllib3.util.Timeout` """
+ if timeout is _Default:
+ return self.timeout.clone()
+
+ if isinstance(timeout, Timeout):
+ return timeout.clone()
+ else:
+ # User passed us an int/float. This is for backwards compatibility,
+ # can be removed later
+ return Timeout.from_float(timeout)
+
+ def _raise_timeout(self, err, url, timeout_value):
+ """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+ if isinstance(err, SocketTimeout):
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # See the above comment about EAGAIN in Python 3. In Python 2 we have
+ # to specifically catch it and throw the timeout error
+ if hasattr(err, 'errno') and err.errno in _blocking_errnos:
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # Catch possible read timeouts thrown as SSL errors. If not the
+ # case, rethrow the original. We need to do this because of:
+ # http://bugs.python.org/issue10272
+ if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
+ **httplib_request_kw):
+ """
+ Perform a request on a given urllib connection object taken from our
+ pool.
+
+ :param conn:
+ a connection from one of our connection pools
+
+ :param timeout:
+ Socket timeout in seconds for the request. This can be a
+ float or integer, which will set the same timeout value for
+ the socket connect and the socket read, or an instance of
+ :class:`urllib3.util.Timeout`, which gives you more fine-grained
+ control over your timeouts.
+ """
+ self.num_requests += 1
+
+ timeout_obj = self._get_timeout(timeout)
+ timeout_obj.start_connect()
+ conn.timeout = timeout_obj.connect_timeout
+
+ # Trigger any extra validation we need to do.
+ try:
+ self._validate_conn(conn)
+ except (SocketTimeout, BaseSSLError) as e:
+ # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+ self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+ raise
+
+ # conn.request() calls httplib.*.request, not the method in
+ # urllib3.request. It also calls makefile (recv) on the socket.
+ if chunked:
+ conn.request_chunked(method, url, **httplib_request_kw)
+ else:
+ conn.request(method, url, **httplib_request_kw)
+
+ # Reset the timeout for the recv() on the socket
+ read_timeout = timeout_obj.read_timeout
+
+ # App Engine doesn't have a sock attr
+ if getattr(conn, 'sock', None):
+ # In Python 3 socket.py will catch EAGAIN and return None when you
+ # try and read into the file pointer created by http.client, which
+ # instead raises a BadStatusLine exception. Instead of catching
+ # the exception and assuming all BadStatusLine exceptions are read
+ # timeouts, check for a zero timeout before making the request.
+ if read_timeout == 0:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % read_timeout)
+ if read_timeout is Timeout.DEFAULT_TIMEOUT:
+ conn.sock.settimeout(socket.getdefaulttimeout())
+ else: # None or a value
+ conn.sock.settimeout(read_timeout)
+
+ # Receive the response from the server
+ try:
+ try: # Python 2.7, use buffering of HTTP responses
+ httplib_response = conn.getresponse(buffering=True)
+ except TypeError: # Python 2.6 and older, Python 3
+ try:
+ httplib_response = conn.getresponse()
+ except Exception as e:
+ # Remove the TypeError from the exception chain in Python 3;
+ # otherwise it looks like a programming error was the cause.
+ six.raise_from(e, None)
+ except (SocketTimeout, BaseSSLError, SocketError) as e:
+ self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
+ raise
+
+ # AppEngine doesn't have a version attr.
+ http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
+ log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
+ method, url, http_version, httplib_response.status,
+ httplib_response.length)
+
+ try:
+ assert_header_parsing(httplib_response.msg)
+ except HeaderParsingError as hpe: # Platform-specific: Python 3
+ log.warning(
+ 'Failed to parse headers (url=%s): %s',
+ self._absolute_url(url), hpe, exc_info=True)
+
+ return httplib_response
+
+ def _absolute_url(self, path):
+ return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ # Disable access to the pool
+ old_pool, self.pool = self.pool, None
+
+ try:
+ while True:
+ conn = old_pool.get(block=False)
+ if conn:
+ conn.close()
+
+ except queue.Empty:
+ pass # Done.
+
+ def is_same_host(self, url):
+ """
+ Check if the given ``url`` is a member of the same host as this
+ connection pool.
+ """
+ if url.startswith('/'):
+ return True
+
+ # TODO: Add optional support for socket.gethostbyname checking.
+ scheme, host, port = get_host(url)
+
+ host = _ipv6_host(host).lower()
+
+ # Use explicit default port for comparison when none is given
+ if self.port and not port:
+ port = port_by_scheme.get(scheme)
+ elif not self.port and port == port_by_scheme.get(scheme):
+ port = None
+
+ return (scheme, host, port) == (self.scheme, self.host, self.port)
+
+ def urlopen(self, method, url, body=None, headers=None, retries=None,
+ redirect=True, assert_same_host=True, timeout=_Default,
+ pool_timeout=None, release_conn=None, chunked=False,
+ body_pos=None, **response_kw):
+ """
+ Get a connection from the pool and perform an HTTP request. This is the
+ lowest level call for making a request, so you'll need to specify all
+ the raw details.
+
+ .. note::
+
+ More commonly, it's appropriate to use a convenience method provided
+ by :class:`.RequestMethods`, such as :meth:`request`.
+
+ .. note::
+
+ `release_conn` will only behave as expected if
+ `preload_content=False` because we want to make
+ `preload_content=False` the default behaviour someday soon without
+ breaking backwards compatibility.
+
+ :param method:
+ HTTP request method (such as GET, POST, PUT, etc.)
+
+ :param body:
+ Data to send in the request body (useful for creating
+ POST requests, see HTTPConnectionPool.post_url for
+ more convenience).
+
+ :param headers:
+ Dictionary of custom headers to send, such as User-Agent,
+ If-None-Match, etc. If None, pool headers are used. If provided,
+ these headers completely replace any pool-specific headers.
+
+ :param retries:
+ Configure the number of retries to allow before raising a
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+ Pass ``None`` to retry until you receive a response. Pass a
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
+ over different types of retries.
+ Pass an integer number to retry connection errors that many times,
+ but no other types of errors. Pass zero to never retry.
+
+ If ``False``, then retries are disabled and any exception is raised
+ immediately. Also, instead of raising a MaxRetryError on redirects,
+ the redirect response will be returned.
+
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
+
+ :param redirect:
+ If True, automatically handle redirects (status codes 301, 302,
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
+ will disable redirect, too.
+
+ :param assert_same_host:
+ If ``True``, will make sure that the host of the pool requests is
+ consistent else will raise HostChangedError. When False, you can
+ use the pool on an HTTP proxy and request foreign hosts.
+
+ :param timeout:
+ If specified, overrides the default timeout for this one
+ request. It may be a float (in seconds) or an instance of
+ :class:`urllib3.util.Timeout`.
+
+ :param pool_timeout:
+ If set and the pool is set to block=True, then this method will
+ block for ``pool_timeout`` seconds and raise EmptyPoolError if no
+ connection is available within the time period.
+
+ :param release_conn:
+ If False, then the urlopen call will not release the connection
+ back into the pool once a response is received (but will release if
+ you read the entire contents of the response such as when
+ `preload_content=True`). This is useful if you're not preloading
+ the response's content immediately. You will need to call
+ ``r.release_conn()`` on the response ``r`` to return the connection
+ back into the pool. If None, it takes the value of
+ ``response_kw.get('preload_content', True)``.
+
+ :param chunked:
+ If True, urllib3 will send the body using chunked transfer
+ encoding. Otherwise, urllib3 will send the body using the standard
+ content-length form. Defaults to False.
+
+ :param int body_pos:
+ Position to seek to in file-like body in the event of a retry or
+ redirect. Typically this won't need to be set because urllib3 will
+ auto-populate the value when needed.
+
+ :param \\**response_kw:
+ Additional parameters are passed to
+ :meth:`urllib3.response.HTTPResponse.from_httplib`
+ """
+ if headers is None:
+ headers = self.headers
+
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if release_conn is None:
+ release_conn = response_kw.get('preload_content', True)
+
+ # Check host
+ if assert_same_host and not self.is_same_host(url):
+ raise HostChangedError(self, url, retries)
+
+ conn = None
+
+ # Track whether `conn` needs to be released before
+ # returning/raising/recursing. Update this variable if necessary, and
+ # leave `release_conn` constant throughout the function. That way, if
+ # the function recurses, the original value of `release_conn` will be
+ # passed down into the recursive call, and its value will be respected.
+ #
+ # See issue #651 [1] for details.
+ #
+ # [1] <https://github.com/shazow/urllib3/issues/651>
+ release_this_conn = release_conn
+
+ # Merge the proxy headers. Only do this in HTTP. We have to copy the
+ # headers dict so we can safely change it without those changes being
+ # reflected in anyone else's copy.
+ if self.scheme == 'http':
+ headers = headers.copy()
+ headers.update(self.proxy_headers)
+
+ # Must keep the exception bound to a separate variable or else Python 3
+ # complains about UnboundLocalError.
+ err = None
+
+ # Keep track of whether we cleanly exited the except block. This
+ # ensures we do proper cleanup in finally.
+ clean_exit = False
+
+ # Rewind body position, if needed. Record current position
+ # for future rewinds in the event of a redirect/retry.
+ body_pos = set_file_position(body, body_pos)
+
+ try:
+ # Request a connection from the queue.
+ timeout_obj = self._get_timeout(timeout)
+ conn = self._get_conn(timeout=pool_timeout)
+
+ conn.timeout = timeout_obj.connect_timeout
+
+ is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
+ if is_new_proxy_conn:
+ self._prepare_proxy(conn)
+
+ # Make the request on the httplib connection object.
+ httplib_response = self._make_request(conn, method, url,
+ timeout=timeout_obj,
+ body=body, headers=headers,
+ chunked=chunked)
+
+ # If we're going to release the connection in ``finally:``, then
+ # the response doesn't need to know about the connection. Otherwise
+ # it will also try to release it and we'll have a double-release
+ # mess.
+ response_conn = conn if not release_conn else None
+
+ # Pass method to Response for length checking
+ response_kw['request_method'] = method
+
+ # Import httplib's response into our own wrapper object
+ response = self.ResponseCls.from_httplib(httplib_response,
+ pool=self,
+ connection=response_conn,
+ retries=retries,
+ **response_kw)
+
+ # Everything went great!
+ clean_exit = True
+
+ except queue.Empty:
+ # Timed out by queue.
+ raise EmptyPoolError(self, "No pool connections are available.")
+
+ except (BaseSSLError, CertificateError) as e:
+ # Close the connection. If a connection is reused on which there
+ # was a Certificate error, the next request will certainly raise
+ # another Certificate error.
+ clean_exit = False
+ raise SSLError(e)
+
+ except SSLError:
+ # Treat SSLError separately from BaseSSLError to preserve
+ # traceback.
+ clean_exit = False
+ raise
+
+ except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
+ # Discard the connection for these exceptions. It will be
+ # be replaced during the next _get_conn() call.
+ clean_exit = False
+
+ if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
+ e = ProxyError('Cannot connect to proxy.', e)
+ elif isinstance(e, (SocketError, HTTPException)):
+ e = ProtocolError('Connection aborted.', e)
+
+ retries = retries.increment(method, url, error=e, _pool=self,
+ _stacktrace=sys.exc_info()[2])
+ retries.sleep()
+
+ # Keep track of the error for the retry warning.
+ err = e
+
+ finally:
+ if not clean_exit:
+ # We hit some kind of exception, handled or otherwise. We need
+ # to throw the connection away unless explicitly told not to.
+ # Close the connection, set the variable to None, and make sure
+ # we put the None back in the pool to avoid leaking it.
+ conn = conn and conn.close()
+ release_this_conn = True
+
+ if release_this_conn:
+ # Put the connection back to be reused. If the connection is
+ # expired then it will be None, which will get replaced with a
+ # fresh connection during _get_conn.
+ self._put_conn(conn)
+
+ if not conn:
+ # Try again
+ log.warning("Retrying (%r) after connection "
+ "broken by '%r': %s", retries, err, url)
+ return self.urlopen(method, url, body, headers, retries,
+ redirect, assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and response.get_redirect_location()
+ if redirect_location:
+ if response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+
+ retries.sleep_for_retry(response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(
+ method, redirect_location, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(response.getheader('Retry-After'))
+ if retries.is_retry(method, response.status, has_retry_after):
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_status:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+ retries.sleep(response)
+ log.debug("Retry: %s", url)
+ return self.urlopen(
+ method, url, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ body_pos=body_pos, **response_kw)
+
+ return response
+
+
+class HTTPSConnectionPool(HTTPConnectionPool):
+ """
+ Same as :class:`.HTTPConnectionPool`, but HTTPS.
+
+ When Python is compiled with the :mod:`ssl` module, then
+ :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
+ instead of :class:`.HTTPSConnection`.
+
+ :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
+ ``assert_hostname`` and ``host`` in this order to verify connections.
+ If ``assert_hostname`` is False, no verification is done.
+
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+ ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
+ available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+ the connection socket into an SSL socket.
+ """
+
+ scheme = 'https'
+ ConnectionCls = HTTPSConnection
+
+ def __init__(self, host, port=None,
+ strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
+ block=False, headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ key_file=None, cert_file=None, cert_reqs=None,
+ ca_certs=None, ssl_version=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None, **conn_kw):
+
+ HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
+ block, headers, retries, _proxy, _proxy_headers,
+ **conn_kw)
+
+ if ca_certs and cert_reqs is None:
+ cert_reqs = 'CERT_REQUIRED'
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ def _prepare_conn(self, conn):
+ """
+ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
+ and establish the tunnel if proxy is used.
+ """
+
+ if isinstance(conn, VerifiedHTTPSConnection):
+ conn.set_cert(key_file=self.key_file,
+ cert_file=self.cert_file,
+ cert_reqs=self.cert_reqs,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint)
+ conn.ssl_version = self.ssl_version
+ return conn
+
+ def _prepare_proxy(self, conn):
+ """
+ Establish tunnel connection early, because otherwise httplib
+ would improperly set Host: header to proxy's IP:port.
+ """
+ # Python 2.7+
+ try:
+ set_tunnel = conn.set_tunnel
+ except AttributeError: # Platform-specific: Python 2.6
+ set_tunnel = conn._set_tunnel
+
+ if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
+ set_tunnel(self.host, self.port)
+ else:
+ set_tunnel(self.host, self.port, self.proxy_headers)
+
+ conn.connect()
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`httplib.HTTPSConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTPS connection (%d): %s",
+ self.num_connections, self.host)
+
+ if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
+ raise SSLError("Can't connect to HTTPS URL because the SSL "
+ "module is not available.")
+
+ actual_host = self.host
+ actual_port = self.port
+ if self.proxy is not None:
+ actual_host = self.proxy.host
+ actual_port = self.proxy.port
+
+ conn = self.ConnectionCls(host=actual_host, port=actual_port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+
+ return self._prepare_conn(conn)
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+ # Force connect early to allow us to validate the connection.
+ if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
+ conn.connect()
+
+ if not conn.is_verified:
+ warnings.warn((
+ 'Unverified HTTPS request is being made. '
+ 'Adding certificate verification is strongly advised. See: '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings'),
+ InsecureRequestWarning)
+
+
+def connection_from_url(url, **kw):
+ """
+ Given a url, return an :class:`.ConnectionPool` instance of its host.
+
+ This is a shortcut for not having to parse out the scheme, host, and port
+ of the url before creating an :class:`.ConnectionPool` instance.
+
+ :param url:
+ Absolute URL string that must include the scheme. Port is optional.
+
+ :param \\**kw:
+ Passes additional parameters to the constructor of the appropriate
+ :class:`.ConnectionPool`. Useful for specifying things like
+ timeout, maxsize, headers, etc.
+
+ Example::
+
+ >>> conn = connection_from_url('http://google.com/')
+ >>> r = conn.request('GET', '/')
+ """
+ scheme, host, port = get_host(url)
+ port = port or port_by_scheme.get(scheme, 80)
+ if scheme == 'https':
+ return HTTPSConnectionPool(host, port=port, **kw)
+ else:
+ return HTTPConnectionPool(host, port=port, **kw)
+
+
+def _ipv6_host(host):
+ """
+ Process IPv6 address literals
+ """
+
+ # httplib doesn't like it when we include brackets in IPv6 addresses
+ # Specifically, if we include brackets but also pass the port then
+ # httplib crazily doubles up the square brackets on the Host header.
+ # Instead, we need to make sure we never pass ``None`` as the port.
+ # However, for backward compatibility reasons we can't actually
+ # *assert* that. See http://bugs.python.org/issue28539
+ #
+ # Also if an IPv6 address literal has a zone identifier, the
+ # percent sign might be URIencoded, convert it back into ASCII
+ if host.startswith('[') and host.endswith(']'):
+ host = host.replace('%25', '%').strip('[]')
+ return host
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
new file mode 100644
index 0000000..bb82667
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
@@ -0,0 +1,591 @@
+# SPDX-License-Identifier: MIT
+"""
+This module uses ctypes to bind a whole bunch of functions and constants from
+SecureTransport. The goal here is to provide the low-level API to
+SecureTransport. These are essentially the C-level functions and constants, and
+they're pretty gross to work with.
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+ Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import platform
+from ctypes.util import find_library
+from ctypes import (
+ c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
+ c_bool
+)
+from ctypes import CDLL, POINTER, CFUNCTYPE
+
+
+security_path = find_library('Security')
+if not security_path:
+ raise ImportError('The library Security could not be found')
+
+
+core_foundation_path = find_library('CoreFoundation')
+if not core_foundation_path:
+ raise ImportError('The library CoreFoundation could not be found')
+
+
+version = platform.mac_ver()[0]
+version_info = tuple(map(int, version.split('.')))
+if version_info < (10, 8):
+ raise OSError(
+ 'Only OS X 10.8 and newer are supported, not %s.%s' % (
+ version_info[0], version_info[1]
+ )
+ )
+
+Security = CDLL(security_path, use_errno=True)
+CoreFoundation = CDLL(core_foundation_path, use_errno=True)
+
+Boolean = c_bool
+CFIndex = c_long
+CFStringEncoding = c_uint32
+CFData = c_void_p
+CFString = c_void_p
+CFArray = c_void_p
+CFMutableArray = c_void_p
+CFDictionary = c_void_p
+CFError = c_void_p
+CFType = c_void_p
+CFTypeID = c_ulong
+
+CFTypeRef = POINTER(CFType)
+CFAllocatorRef = c_void_p
+
+OSStatus = c_int32
+
+CFDataRef = POINTER(CFData)
+CFStringRef = POINTER(CFString)
+CFArrayRef = POINTER(CFArray)
+CFMutableArrayRef = POINTER(CFMutableArray)
+CFDictionaryRef = POINTER(CFDictionary)
+CFArrayCallBacks = c_void_p
+CFDictionaryKeyCallBacks = c_void_p
+CFDictionaryValueCallBacks = c_void_p
+
+SecCertificateRef = POINTER(c_void_p)
+SecExternalFormat = c_uint32
+SecExternalItemType = c_uint32
+SecIdentityRef = POINTER(c_void_p)
+SecItemImportExportFlags = c_uint32
+SecItemImportExportKeyParameters = c_void_p
+SecKeychainRef = POINTER(c_void_p)
+SSLProtocol = c_uint32
+SSLCipherSuite = c_uint32
+SSLContextRef = POINTER(c_void_p)
+SecTrustRef = POINTER(c_void_p)
+SSLConnectionRef = c_uint32
+SecTrustResultType = c_uint32
+SecTrustOptionFlags = c_uint32
+SSLProtocolSide = c_uint32
+SSLConnectionType = c_uint32
+SSLSessionOption = c_uint32
+
+
+try:
+ Security.SecItemImport.argtypes = [
+ CFDataRef,
+ CFStringRef,
+ POINTER(SecExternalFormat),
+ POINTER(SecExternalItemType),
+ SecItemImportExportFlags,
+ POINTER(SecItemImportExportKeyParameters),
+ SecKeychainRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecItemImport.restype = OSStatus
+
+ Security.SecCertificateGetTypeID.argtypes = []
+ Security.SecCertificateGetTypeID.restype = CFTypeID
+
+ Security.SecIdentityGetTypeID.argtypes = []
+ Security.SecIdentityGetTypeID.restype = CFTypeID
+
+ Security.SecKeyGetTypeID.argtypes = []
+ Security.SecKeyGetTypeID.restype = CFTypeID
+
+ Security.SecCertificateCreateWithData.argtypes = [
+ CFAllocatorRef,
+ CFDataRef
+ ]
+ Security.SecCertificateCreateWithData.restype = SecCertificateRef
+
+ Security.SecCertificateCopyData.argtypes = [
+ SecCertificateRef
+ ]
+ Security.SecCertificateCopyData.restype = CFDataRef
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SecIdentityCreateWithCertificate.argtypes = [
+ CFTypeRef,
+ SecCertificateRef,
+ POINTER(SecIdentityRef)
+ ]
+ Security.SecIdentityCreateWithCertificate.restype = OSStatus
+
+ Security.SecKeychainCreate.argtypes = [
+ c_char_p,
+ c_uint32,
+ c_void_p,
+ Boolean,
+ c_void_p,
+ POINTER(SecKeychainRef)
+ ]
+ Security.SecKeychainCreate.restype = OSStatus
+
+ Security.SecKeychainDelete.argtypes = [
+ SecKeychainRef
+ ]
+ Security.SecKeychainDelete.restype = OSStatus
+
+ Security.SecPKCS12Import.argtypes = [
+ CFDataRef,
+ CFDictionaryRef,
+ POINTER(CFArrayRef)
+ ]
+ Security.SecPKCS12Import.restype = OSStatus
+
+ SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
+ SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
+
+ Security.SSLSetIOFuncs.argtypes = [
+ SSLContextRef,
+ SSLReadFunc,
+ SSLWriteFunc
+ ]
+ Security.SSLSetIOFuncs.restype = OSStatus
+
+ Security.SSLSetPeerID.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerID.restype = OSStatus
+
+ Security.SSLSetCertificate.argtypes = [
+ SSLContextRef,
+ CFArrayRef
+ ]
+ Security.SSLSetCertificate.restype = OSStatus
+
+ Security.SSLSetCertificateAuthorities.argtypes = [
+ SSLContextRef,
+ CFTypeRef,
+ Boolean
+ ]
+ Security.SSLSetCertificateAuthorities.restype = OSStatus
+
+ Security.SSLSetConnection.argtypes = [
+ SSLContextRef,
+ SSLConnectionRef
+ ]
+ Security.SSLSetConnection.restype = OSStatus
+
+ Security.SSLSetPeerDomainName.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerDomainName.restype = OSStatus
+
+ Security.SSLHandshake.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLHandshake.restype = OSStatus
+
+ Security.SSLRead.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLRead.restype = OSStatus
+
+ Security.SSLWrite.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLWrite.restype = OSStatus
+
+ Security.SSLClose.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLClose.restype = OSStatus
+
+ Security.SSLGetNumberSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberSupportedCiphers.restype = OSStatus
+
+ Security.SSLGetSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetSupportedCiphers.restype = OSStatus
+
+ Security.SSLSetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ c_size_t
+ ]
+ Security.SSLSetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNumberEnabledCiphers.argtype = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNegotiatedCipher.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite)
+ ]
+ Security.SSLGetNegotiatedCipher.restype = OSStatus
+
+ Security.SSLGetNegotiatedProtocolVersion.argtypes = [
+ SSLContextRef,
+ POINTER(SSLProtocol)
+ ]
+ Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
+
+ Security.SSLCopyPeerTrust.argtypes = [
+ SSLContextRef,
+ POINTER(SecTrustRef)
+ ]
+ Security.SSLCopyPeerTrust.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificates.argtypes = [
+ SecTrustRef,
+ CFArrayRef
+ ]
+ Security.SecTrustSetAnchorCertificates.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
+ SecTrustRef,
+ Boolean
+ ]
+ Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
+
+ Security.SecTrustEvaluate.argtypes = [
+ SecTrustRef,
+ POINTER(SecTrustResultType)
+ ]
+ Security.SecTrustEvaluate.restype = OSStatus
+
+ Security.SecTrustGetCertificateCount.argtypes = [
+ SecTrustRef
+ ]
+ Security.SecTrustGetCertificateCount.restype = CFIndex
+
+ Security.SecTrustGetCertificateAtIndex.argtypes = [
+ SecTrustRef,
+ CFIndex
+ ]
+ Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
+
+ Security.SSLCreateContext.argtypes = [
+ CFAllocatorRef,
+ SSLProtocolSide,
+ SSLConnectionType
+ ]
+ Security.SSLCreateContext.restype = SSLContextRef
+
+ Security.SSLSetSessionOption.argtypes = [
+ SSLContextRef,
+ SSLSessionOption,
+ Boolean
+ ]
+ Security.SSLSetSessionOption.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMin.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMin.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMax.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMax.restype = OSStatus
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SSLReadFunc = SSLReadFunc
+ Security.SSLWriteFunc = SSLWriteFunc
+ Security.SSLContextRef = SSLContextRef
+ Security.SSLProtocol = SSLProtocol
+ Security.SSLCipherSuite = SSLCipherSuite
+ Security.SecIdentityRef = SecIdentityRef
+ Security.SecKeychainRef = SecKeychainRef
+ Security.SecTrustRef = SecTrustRef
+ Security.SecTrustResultType = SecTrustResultType
+ Security.SecExternalFormat = SecExternalFormat
+ Security.OSStatus = OSStatus
+
+ Security.kSecImportExportPassphrase = CFStringRef.in_dll(
+ Security, 'kSecImportExportPassphrase'
+ )
+ Security.kSecImportItemIdentity = CFStringRef.in_dll(
+ Security, 'kSecImportItemIdentity'
+ )
+
+ # CoreFoundation time!
+ CoreFoundation.CFRetain.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRetain.restype = CFTypeRef
+
+ CoreFoundation.CFRelease.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRelease.restype = None
+
+ CoreFoundation.CFGetTypeID.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFGetTypeID.restype = CFTypeID
+
+ CoreFoundation.CFStringCreateWithCString.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
+
+ CoreFoundation.CFStringGetCStringPtr.argtypes = [
+ CFStringRef,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
+
+ CoreFoundation.CFStringGetCString.argtypes = [
+ CFStringRef,
+ c_char_p,
+ CFIndex,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCString.restype = c_bool
+
+ CoreFoundation.CFDataCreate.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFIndex
+ ]
+ CoreFoundation.CFDataCreate.restype = CFDataRef
+
+ CoreFoundation.CFDataGetLength.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetLength.restype = CFIndex
+
+ CoreFoundation.CFDataGetBytePtr.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetBytePtr.restype = c_void_p
+
+ CoreFoundation.CFDictionaryCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFDictionaryKeyCallBacks,
+ CFDictionaryValueCallBacks
+ ]
+ CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
+
+ CoreFoundation.CFDictionaryGetValue.argtypes = [
+ CFDictionaryRef,
+ CFTypeRef
+ ]
+ CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
+
+ CoreFoundation.CFArrayCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreate.restype = CFArrayRef
+
+ CoreFoundation.CFArrayCreateMutable.argtypes = [
+ CFAllocatorRef,
+ CFIndex,
+ CFArrayCallBacks
+ ]
+ CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
+
+ CoreFoundation.CFArrayAppendValue.argtypes = [
+ CFMutableArrayRef,
+ c_void_p
+ ]
+ CoreFoundation.CFArrayAppendValue.restype = None
+
+ CoreFoundation.CFArrayGetCount.argtypes = [
+ CFArrayRef
+ ]
+ CoreFoundation.CFArrayGetCount.restype = CFIndex
+
+ CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
+ CFArrayRef,
+ CFIndex
+ ]
+ CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
+
+ CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
+ CoreFoundation, 'kCFAllocatorDefault'
+ )
+ CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
+ )
+ CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
+ )
+
+ CoreFoundation.CFTypeRef = CFTypeRef
+ CoreFoundation.CFArrayRef = CFArrayRef
+ CoreFoundation.CFStringRef = CFStringRef
+ CoreFoundation.CFDictionaryRef = CFDictionaryRef
+
+except (AttributeError):
+ raise ImportError('Error initializing ctypes')
+
+
+class CFConst(object):
+ """
+ A class object that acts as essentially a namespace for CoreFoundation
+ constants.
+ """
+ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
+
+
+class SecurityConst(object):
+ """
+ A class object that acts as essentially a namespace for Security constants.
+ """
+ kSSLSessionOptionBreakOnServerAuth = 0
+
+ kSSLProtocol2 = 1
+ kSSLProtocol3 = 2
+ kTLSProtocol1 = 4
+ kTLSProtocol11 = 7
+ kTLSProtocol12 = 8
+
+ kSSLClientSide = 1
+ kSSLStreamType = 0
+
+ kSecFormatPEMSequence = 10
+
+ kSecTrustResultInvalid = 0
+ kSecTrustResultProceed = 1
+ # This gap is present on purpose: this was kSecTrustResultConfirm, which
+ # is deprecated.
+ kSecTrustResultDeny = 3
+ kSecTrustResultUnspecified = 4
+ kSecTrustResultRecoverableTrustFailure = 5
+ kSecTrustResultFatalTrustFailure = 6
+ kSecTrustResultOtherError = 7
+
+ errSSLProtocol = -9800
+ errSSLWouldBlock = -9803
+ errSSLClosedGraceful = -9805
+ errSSLClosedNoNotify = -9816
+ errSSLClosedAbort = -9806
+
+ errSSLXCertChainInvalid = -9807
+ errSSLCrypto = -9809
+ errSSLInternal = -9810
+ errSSLCertExpired = -9814
+ errSSLCertNotYetValid = -9815
+ errSSLUnknownRootCert = -9812
+ errSSLNoRootCert = -9813
+ errSSLHostNameMismatch = -9843
+ errSSLPeerHandshakeFail = -9824
+ errSSLPeerUserCancelled = -9839
+ errSSLWeakPeerEphemeralDHKey = -9850
+ errSSLServerAuthCompleted = -9841
+ errSSLRecordOverflow = -9847
+
+ errSecVerifyFailed = -67808
+ errSecNoTrustSettings = -25263
+ errSecItemNotFound = -25300
+ errSecInvalidTrustSettings = -25262
+
+ # Cipher suites. We only pick the ones our default cipher string allows.
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
+ TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
+ TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
+ TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
+ TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
+ TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
+ TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
+ TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
+ TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
+ TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
+ TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
new file mode 100644
index 0000000..0f79a13
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
@@ -0,0 +1,344 @@
+# SPDX-License-Identifier: MIT
+"""
+Low-level helpers for the SecureTransport bindings.
+
+These are Python functions that are not directly related to the high-level APIs
+but are necessary to get them to work. They include a whole bunch of low-level
+CoreFoundation messing about and memory management. The concerns in this module
+are almost entirely about trying to avoid memory leaks and providing
+appropriate and useful assistance to the higher-level code.
+"""
+import base64
+import ctypes
+import itertools
+import re
+import os
+import ssl
+import tempfile
+
+from .bindings import Security, CoreFoundation, CFConst
+
+
+# This regular expression is used to grab PEM data out of a PEM bundle.
+_PEM_CERTS_RE = re.compile(
+ b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
+)
+
+
+def _cf_data_from_bytes(bytestring):
+ """
+ Given a bytestring, create a CFData object from it. This CFData object must
+ be CFReleased by the caller.
+ """
+ return CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
+ )
+
+
+def _cf_dictionary_from_tuples(tuples):
+ """
+ Given a list of Python tuples, create an associated CFDictionary.
+ """
+ dictionary_size = len(tuples)
+
+ # We need to get the dictionary keys and values out in the same order.
+ keys = (t[0] for t in tuples)
+ values = (t[1] for t in tuples)
+ cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
+ cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
+
+ return CoreFoundation.CFDictionaryCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ cf_keys,
+ cf_values,
+ dictionary_size,
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks,
+ CoreFoundation.kCFTypeDictionaryValueCallBacks,
+ )
+
+
+def _cf_string_to_unicode(value):
+ """
+ Creates a Unicode string from a CFString object. Used entirely for error
+ reporting.
+
+ Yes, it annoys me quite a lot that this function is this complex.
+ """
+ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
+
+ string = CoreFoundation.CFStringGetCStringPtr(
+ value_as_void_p,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if string is None:
+ buffer = ctypes.create_string_buffer(1024)
+ result = CoreFoundation.CFStringGetCString(
+ value_as_void_p,
+ buffer,
+ 1024,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if not result:
+ raise OSError('Error copying C string from CFStringRef')
+ string = buffer.value
+ if string is not None:
+ string = string.decode('utf-8')
+ return string
+
+
+def _assert_no_error(error, exception_class=None):
+ """
+ Checks the return code and throws an exception if there is an error to
+ report
+ """
+ if error == 0:
+ return
+
+ cf_error_string = Security.SecCopyErrorMessageString(error, None)
+ output = _cf_string_to_unicode(cf_error_string)
+ CoreFoundation.CFRelease(cf_error_string)
+
+ if output is None or output == u'':
+ output = u'OSStatus %s' % error
+
+ if exception_class is None:
+ exception_class = ssl.SSLError
+
+ raise exception_class(output)
+
+
+def _cert_array_from_pem(pem_bundle):
+ """
+ Given a bundle of certs in PEM format, turns them into a CFArray of certs
+ that can be used to validate a cert chain.
+ """
+ der_certs = [
+ base64.b64decode(match.group(1))
+ for match in _PEM_CERTS_RE.finditer(pem_bundle)
+ ]
+ if not der_certs:
+ raise ssl.SSLError("No root certificates specified")
+
+ cert_array = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
+ )
+ if not cert_array:
+ raise ssl.SSLError("Unable to allocate memory!")
+
+ try:
+ for der_bytes in der_certs:
+ certdata = _cf_data_from_bytes(der_bytes)
+ if not certdata:
+ raise ssl.SSLError("Unable to allocate memory!")
+ cert = Security.SecCertificateCreateWithData(
+ CoreFoundation.kCFAllocatorDefault, certdata
+ )
+ CoreFoundation.CFRelease(certdata)
+ if not cert:
+ raise ssl.SSLError("Unable to build cert object!")
+
+ CoreFoundation.CFArrayAppendValue(cert_array, cert)
+ CoreFoundation.CFRelease(cert)
+ except Exception:
+ # We need to free the array before the exception bubbles further.
+ # We only want to do that if an error occurs: otherwise, the caller
+ # should free.
+ CoreFoundation.CFRelease(cert_array)
+
+ return cert_array
+
+
+def _is_cert(item):
+ """
+ Returns True if a given CFTypeRef is a certificate.
+ """
+ expected = Security.SecCertificateGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _is_identity(item):
+ """
+ Returns True if a given CFTypeRef is an identity.
+ """
+ expected = Security.SecIdentityGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _temporary_keychain():
+ """
+ This function creates a temporary Mac keychain that we can use to work with
+ credentials. This keychain uses a one-time password and a temporary file to
+ store the data. We expect to have one keychain per socket. The returned
+ SecKeychainRef must be freed by the caller, including calling
+ SecKeychainDelete.
+
+ Returns a tuple of the SecKeychainRef and the path to the temporary
+ directory that contains it.
+ """
+ # Unfortunately, SecKeychainCreate requires a path to a keychain. This
+ # means we cannot use mkstemp to use a generic temporary file. Instead,
+ # we're going to create a temporary directory and a filename to use there.
+ # This filename will be 8 random bytes expanded into base64. We also need
+ # some random bytes to password-protect the keychain we're creating, so we
+ # ask for 40 random bytes.
+ random_bytes = os.urandom(40)
+ filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
+ password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
+ tempdirectory = tempfile.mkdtemp()
+
+ keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
+
+ # We now want to create the keychain itself.
+ keychain = Security.SecKeychainRef()
+ status = Security.SecKeychainCreate(
+ keychain_path,
+ len(password),
+ password,
+ False,
+ None,
+ ctypes.byref(keychain)
+ )
+ _assert_no_error(status)
+
+ # Having created the keychain, we want to pass it off to the caller.
+ return keychain, tempdirectory
+
+
+def _load_items_from_file(keychain, path):
+ """
+ Given a single file, loads all the trust objects from it into arrays and
+ the keychain.
+ Returns a tuple of lists: the first list is a list of identities, the
+ second a list of certs.
+ """
+ certificates = []
+ identities = []
+ result_array = None
+
+ with open(path, 'rb') as f:
+ raw_filedata = f.read()
+
+ try:
+ filedata = CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ raw_filedata,
+ len(raw_filedata)
+ )
+ result_array = CoreFoundation.CFArrayRef()
+ result = Security.SecItemImport(
+ filedata, # cert data
+ None, # Filename, leaving it out for now
+ None, # What the type of the file is, we don't care
+ None, # what's in the file, we don't care
+ 0, # import flags
+ None, # key params, can include passphrase in the future
+ keychain, # The keychain to insert into
+ ctypes.byref(result_array) # Results
+ )
+ _assert_no_error(result)
+
+ # A CFArray is not very useful to us as an intermediary
+ # representation, so we are going to extract the objects we want
+ # and then free the array. We don't need to keep hold of keys: the
+ # keychain already has them!
+ result_count = CoreFoundation.CFArrayGetCount(result_array)
+ for index in range(result_count):
+ item = CoreFoundation.CFArrayGetValueAtIndex(
+ result_array, index
+ )
+ item = ctypes.cast(item, CoreFoundation.CFTypeRef)
+
+ if _is_cert(item):
+ CoreFoundation.CFRetain(item)
+ certificates.append(item)
+ elif _is_identity(item):
+ CoreFoundation.CFRetain(item)
+ identities.append(item)
+ finally:
+ if result_array:
+ CoreFoundation.CFRelease(result_array)
+
+ CoreFoundation.CFRelease(filedata)
+
+ return (identities, certificates)
+
+
+def _load_client_cert_chain(keychain, *paths):
+ """
+ Load certificates and maybe keys from a number of files. Has the end goal
+ of returning a CFArray containing one SecIdentityRef, and then zero or more
+ SecCertificateRef objects, suitable for use as a client certificate trust
+ chain.
+ """
+ # Ok, the strategy.
+ #
+ # This relies on knowing that macOS will not give you a SecIdentityRef
+ # unless you have imported a key into a keychain. This is a somewhat
+ # artificial limitation of macOS (for example, it doesn't necessarily
+ # affect iOS), but there is nothing inside Security.framework that lets you
+ # get a SecIdentityRef without having a key in a keychain.
+ #
+ # So the policy here is we take all the files and iterate them in order.
+ # Each one will use SecItemImport to have one or more objects loaded from
+ # it. We will also point at a keychain that macOS can use to work with the
+ # private key.
+ #
+ # Once we have all the objects, we'll check what we actually have. If we
+ # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
+ # we'll take the first certificate (which we assume to be our leaf) and
+ # ask the keychain to give us a SecIdentityRef with that cert's associated
+ # key.
+ #
+ # We'll then return a CFArray containing the trust chain: one
+ # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
+ # responsibility for freeing this CFArray will be with the caller. This
+ # CFArray must remain alive for the entire connection, so in practice it
+ # will be stored with a single SSLSocket, along with the reference to the
+ # keychain.
+ certificates = []
+ identities = []
+
+ # Filter out bad paths.
+ paths = (path for path in paths if path)
+
+ try:
+ for file_path in paths:
+ new_identities, new_certs = _load_items_from_file(
+ keychain, file_path
+ )
+ identities.extend(new_identities)
+ certificates.extend(new_certs)
+
+ # Ok, we have everything. The question is: do we have an identity? If
+ # not, we want to grab one from the first cert we have.
+ if not identities:
+ new_identity = Security.SecIdentityRef()
+ status = Security.SecIdentityCreateWithCertificate(
+ keychain,
+ certificates[0],
+ ctypes.byref(new_identity)
+ )
+ _assert_no_error(status)
+ identities.append(new_identity)
+
+ # We now want to release the original certificate, as we no longer
+ # need it.
+ CoreFoundation.CFRelease(certificates.pop(0))
+
+ # We now need to build a new CFArray that holds the trust chain.
+ trust_chain = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ for item in itertools.chain(identities, certificates):
+ # ArrayAppendValue does a CFRetain on the item. That's fine,
+ # because the finally block will release our other refs to them.
+ CoreFoundation.CFArrayAppendValue(trust_chain, item)
+
+ return trust_chain
+ finally:
+ for obj in itertools.chain(identities, certificates):
+ CoreFoundation.CFRelease(obj)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
new file mode 100644
index 0000000..e74589f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
@@ -0,0 +1,297 @@
+# SPDX-License-Identifier: MIT
+"""
+This module provides a pool manager that uses Google App Engine's
+`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+Example usage::
+
+ from urllib3 import PoolManager
+ from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
+
+ if is_appengine_sandbox():
+ # AppEngineManager uses AppEngine's URLFetch API behind the scenes
+ http = AppEngineManager()
+ else:
+ # PoolManager uses a socket-level API behind the scenes
+ http = PoolManager()
+
+ r = http.request('GET', 'https://google.com/')
+
+There are `limitations <https://cloud.google.com/appengine/docs/python/\
+urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
+the best choice for your application. There are three options for using
+urllib3 on Google App Engine:
+
+1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
+ cost-effective in many circumstances as long as your usage is within the
+ limitations.
+2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
+ Sockets also have `limitations and restrictions
+ <https://cloud.google.com/appengine/docs/python/sockets/\
+ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
+ To use sockets, be sure to specify the following in your ``app.yaml``::
+
+ env_variables:
+ GAE_USE_SOCKETS_HTTPLIB : 'true'
+
+3. If you are using `App Engine Flexible
+<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
+:class:`PoolManager` without any configuration or special environment variables.
+"""
+
+from __future__ import absolute_import
+import logging
+import os
+import warnings
+from ..packages.six.moves.urllib.parse import urljoin
+
+from ..exceptions import (
+ HTTPError,
+ HTTPWarning,
+ MaxRetryError,
+ ProtocolError,
+ TimeoutError,
+ SSLError
+)
+
+from ..packages.six import BytesIO
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.timeout import Timeout
+from ..util.retry import Retry
+
+try:
+ from google.appengine.api import urlfetch
+except ImportError:
+ urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+ pass
+
+
+class AppEnginePlatformError(HTTPError):
+ pass
+
+
+class AppEngineManager(RequestMethods):
+ """
+ Connection manager for Google App Engine sandbox applications.
+
+ This manager uses the URLFetch service directly instead of using the
+ emulated httplib, and is subject to URLFetch limitations as described in
+ the App Engine documentation `here
+ <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+ Notably it will raise an :class:`AppEnginePlatformError` if:
+ * URLFetch is not available.
+ * If you attempt to use this on App Engine Flexible, as full socket
+ support is available.
+ * If a request size is more than 10 megabytes.
+ * If a response size is more than 32 megabtyes.
+ * If you use an unsupported request method such as OPTIONS.
+
+ Beyond those cases, it will raise normal urllib3 errors.
+ """
+
+ def __init__(self, headers=None, retries=None, validate_certificate=True,
+ urlfetch_retries=True):
+ if not urlfetch:
+ raise AppEnginePlatformError(
+ "URLFetch is not available in this environment.")
+
+ if is_prod_appengine_mvms():
+ raise AppEnginePlatformError(
+ "Use normal urllib3.PoolManager instead of AppEngineManager"
+ "on Managed VMs, as using URLFetch is not necessary in "
+ "this environment.")
+
+ warnings.warn(
+ "urllib3 is using URLFetch on Google App Engine sandbox instead "
+ "of sockets. To use sockets directly instead of URLFetch see "
+ "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
+ AppEnginePlatformWarning)
+
+ RequestMethods.__init__(self, headers)
+ self.validate_certificate = validate_certificate
+ self.urlfetch_retries = urlfetch_retries
+
+ self.retries = retries or Retry.DEFAULT
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def urlopen(self, method, url, body=None, headers=None,
+ retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
+ **response_kw):
+
+ retries = self._get_retries(retries, redirect)
+
+ try:
+ follow_redirects = (
+ redirect and
+ retries.redirect != 0 and
+ retries.total)
+ response = urlfetch.fetch(
+ url,
+ payload=body,
+ method=method,
+ headers=headers or {},
+ allow_truncated=False,
+ follow_redirects=self.urlfetch_retries and follow_redirects,
+ deadline=self._get_absolute_timeout(timeout),
+ validate_certificate=self.validate_certificate,
+ )
+ except urlfetch.DeadlineExceededError as e:
+ raise TimeoutError(self, e)
+
+ except urlfetch.InvalidURLError as e:
+ if 'too large' in str(e):
+ raise AppEnginePlatformError(
+ "URLFetch request too large, URLFetch only "
+ "supports requests up to 10mb in size.", e)
+ raise ProtocolError(e)
+
+ except urlfetch.DownloadError as e:
+ if 'Too many redirects' in str(e):
+ raise MaxRetryError(self, url, reason=e)
+ raise ProtocolError(e)
+
+ except urlfetch.ResponseTooLargeError as e:
+ raise AppEnginePlatformError(
+ "URLFetch response too large, URLFetch only supports"
+ "responses up to 32mb in size.", e)
+
+ except urlfetch.SSLCertificateError as e:
+ raise SSLError(e)
+
+ except urlfetch.InvalidMethodError as e:
+ raise AppEnginePlatformError(
+ "URLFetch does not support method: %s" % method, e)
+
+ http_response = self._urlfetch_response_to_http_response(
+ response, retries=retries, **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and http_response.get_redirect_location()
+ if redirect_location:
+ # Check for redirect response
+ if (self.urlfetch_retries and retries.raise_on_redirect):
+ raise MaxRetryError(self, url, "too many redirects")
+ else:
+ if http_response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=http_response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ return http_response
+
+ retries.sleep_for_retry(http_response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ redirect_url = urljoin(url, redirect_location)
+ return self.urlopen(
+ method, redirect_url, body, headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(http_response.getheader('Retry-After'))
+ if retries.is_retry(method, http_response.status, has_retry_after):
+ retries = retries.increment(
+ method, url, response=http_response, _pool=self)
+ log.debug("Retry: %s", url)
+ retries.sleep(http_response)
+ return self.urlopen(
+ method, url,
+ body=body, headers=headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ return http_response
+
+ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+ if is_prod_appengine():
+ # Production GAE handles deflate encoding automatically, but does
+ # not remove the encoding header.
+ content_encoding = urlfetch_resp.headers.get('content-encoding')
+
+ if content_encoding == 'deflate':
+ del urlfetch_resp.headers['content-encoding']
+
+ transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
+ # We have a full response's content,
+ # so let's make sure we don't report ourselves as chunked data.
+ if transfer_encoding == 'chunked':
+ encodings = transfer_encoding.split(",")
+ encodings.remove('chunked')
+ urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
+
+ return HTTPResponse(
+ # In order for decoding to work, we must present the content as
+ # a file-like object.
+ body=BytesIO(urlfetch_resp.content),
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ **response_kw
+ )
+
+ def _get_absolute_timeout(self, timeout):
+ if timeout is Timeout.DEFAULT_TIMEOUT:
+ return None # Defer to URLFetch's default.
+ if isinstance(timeout, Timeout):
+ if timeout._read is not None or timeout._connect is not None:
+ warnings.warn(
+ "URLFetch does not support granular timeout settings, "
+ "reverting to total or default URLFetch timeout.",
+ AppEnginePlatformWarning)
+ return timeout.total
+ return timeout
+
+ def _get_retries(self, retries, redirect):
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(
+ retries, redirect=redirect, default=self.retries)
+
+ if retries.connect or retries.read or retries.redirect:
+ warnings.warn(
+ "URLFetch only supports total retries and does not "
+ "recognize connect, read, or redirect retry parameters.",
+ AppEnginePlatformWarning)
+
+ return retries
+
+
+def is_appengine():
+ return (is_local_appengine() or
+ is_prod_appengine() or
+ is_prod_appengine_mvms())
+
+
+def is_appengine_sandbox():
+ return is_appengine() and not is_prod_appengine_mvms()
+
+
+def is_local_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Development/' in os.environ['SERVER_SOFTWARE'])
+
+
+def is_prod_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
+ not is_prod_appengine_mvms())
+
+
+def is_prod_appengine_mvms():
+ return os.environ.get('GAE_VM', False) == 'true'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
new file mode 100644
index 0000000..3f8c9eb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: MIT
+"""
+NTLM authenticating pool, contributed by erikcederstran
+
+Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
+"""
+from __future__ import absolute_import
+
+from logging import getLogger
+from ntlm import ntlm
+
+from .. import HTTPSConnectionPool
+from ..packages.six.moves.http_client import HTTPSConnection
+
+
+log = getLogger(__name__)
+
+
+class NTLMConnectionPool(HTTPSConnectionPool):
+ """
+ Implements an NTLM authentication version of an urllib3 connection pool
+ """
+
+ scheme = 'https'
+
+ def __init__(self, user, pw, authurl, *args, **kwargs):
+ """
+ authurl is a random URL on the server that is protected by NTLM.
+ user is the Windows user, probably in the DOMAIN\\username format.
+ pw is the password for the user.
+ """
+ super(NTLMConnectionPool, self).__init__(*args, **kwargs)
+ self.authurl = authurl
+ self.rawuser = user
+ user_parts = user.split('\\', 1)
+ self.domain = user_parts[0].upper()
+ self.user = user_parts[1]
+ self.pw = pw
+
+ def _new_conn(self):
+ # Performs the NTLM handshake that secures the connection. The socket
+ # must be kept open while requests are performed.
+ self.num_connections += 1
+ log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
+ self.num_connections, self.host, self.authurl)
+
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ req_header = 'Authorization'
+ resp_header = 'www-authenticate'
+
+ conn = HTTPSConnection(host=self.host, port=self.port)
+
+ # Send negotiation message
+ headers[req_header] = (
+ 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ reshdr = dict(res.getheaders())
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', reshdr)
+ log.debug('Response data: %s [...]', res.read(100))
+
+ # Remove the reference to the socket, so that it can not be closed by
+ # the response object (we want to keep the socket open)
+ res.fp = None
+
+ # Server should respond with a challenge message
+ auth_header_values = reshdr[resp_header].split(', ')
+ auth_header_value = None
+ for s in auth_header_values:
+ if s[:5] == 'NTLM ':
+ auth_header_value = s[5:]
+ if auth_header_value is None:
+ raise Exception('Unexpected %s response header: %s' %
+ (resp_header, reshdr[resp_header]))
+
+ # Send authentication message
+ ServerChallenge, NegotiateFlags = \
+ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
+ auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
+ self.user,
+ self.domain,
+ self.pw,
+ NegotiateFlags)
+ headers[req_header] = 'NTLM %s' % auth_msg
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', dict(res.getheaders()))
+ log.debug('Response data: %s [...]', res.read()[:100])
+ if res.status != 200:
+ if res.status == 401:
+ raise Exception('Server rejected request: wrong '
+ 'username or password')
+ raise Exception('Wrong server response: %s %s' %
+ (res.status, res.reason))
+
+ res.fp = None
+ log.debug('Connection established')
+ return conn
+
+ def urlopen(self, method, url, body=None, headers=None, retries=3,
+ redirect=True, assert_same_host=True):
+ if headers is None:
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ return super(NTLMConnectionPool, self).urlopen(method, url, body,
+ headers, retries,
+ redirect,
+ assert_same_host)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
new file mode 100644
index 0000000..8d37350
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
@@ -0,0 +1,458 @@
+# SPDX-License-Identifier: MIT
+"""
+SSL with SNI_-support for Python 2. Follow these instructions if you would
+like to verify SSL certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
+
+This needs the following packages installed:
+
+* pyOpenSSL (tested with 16.0.0)
+* cryptography (minimum 1.3.4, from pyopenssl)
+* idna (minimum 2.0, from cryptography)
+
+However, pyopenssl depends on cryptography, which depends on idna, so while we
+use all three directly here we end up having relatively few packages required.
+
+You can install them with the following command:
+
+ pip install pyopenssl cryptography idna
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this::
+
+ try:
+ import urllib3.contrib.pyopenssl
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+ except ImportError:
+ pass
+
+Now you can use :mod:`urllib3` as you normally would, and it will support SNI
+when the required modules are installed.
+
+Activating this module also has the positive side effect of disabling SSL/TLS
+compression in Python 2 (see `CRIME attack`_).
+
+If you want to configure the default list of supported cipher suites, you can
+set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
+
+.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
+.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
+"""
+from __future__ import absolute_import
+
+import OpenSSL.SSL
+from cryptography import x509
+from cryptography.hazmat.backends.openssl import backend as openssl_backend
+from cryptography.hazmat.backends.openssl.x509 import _Certificate
+
+from socket import timeout, error as SocketError
+from io import BytesIO
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+import logging
+import ssl
+
+try:
+ import six
+except ImportError:
+ from ..packages import six
+
+import sys
+
+from .. import util
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works.
+HAS_SNI = True
+
+# Map from urllib3 to PyOpenSSL compatible parameter-values.
+_openssl_versions = {
+ ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
+ ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
+}
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
+try:
+ _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
+except AttributeError:
+ pass
+
+_stdlib_to_openssl_verify = {
+ ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
+ ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
+ ssl.CERT_REQUIRED:
+ OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+}
+_openssl_to_stdlib_verify = dict(
+ (v, k) for k, v in _stdlib_to_openssl_verify.items()
+)
+
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+
+log = logging.getLogger(__name__)
+
+
+def inject_into_urllib3():
+ 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
+
+ _validate_dependencies_met()
+
+ util.ssl_.SSLContext = PyOpenSSLContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_PYOPENSSL = True
+ util.ssl_.IS_PYOPENSSL = True
+
+
+def extract_from_urllib3():
+ 'Undo monkey-patching by :func:`inject_into_urllib3`.'
+
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_PYOPENSSL = False
+ util.ssl_.IS_PYOPENSSL = False
+
+
+def _validate_dependencies_met():
+ """
+ Verifies that PyOpenSSL's package-level dependencies have been met.
+ Throws `ImportError` if they are not met.
+ """
+ # Method added in `cryptography==1.1`; not available in older versions
+ from cryptography.x509.extensions import Extensions
+ if getattr(Extensions, "get_extension_for_class", None) is None:
+ raise ImportError("'cryptography' module missing required functionality. "
+ "Try upgrading to v1.3.4 or newer.")
+
+ # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
+ # attribute is only present on those versions.
+ from OpenSSL.crypto import X509
+ x509 = X509()
+ if getattr(x509, "_x509", None) is None:
+ raise ImportError("'pyOpenSSL' module missing required functionality. "
+ "Try upgrading to v0.14 or newer.")
+
+
+def _dnsname_to_stdlib(name):
+ """
+ Converts a dNSName SubjectAlternativeName field to the form used by the
+ standard library on the given Python version.
+
+ Cryptography produces a dNSName as a unicode string that was idna-decoded
+ from ASCII bytes. We need to idna-encode that string to get it back, and
+ then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
+ uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
+ """
+ def idna_encode(name):
+ """
+ Borrowed wholesale from the Python Cryptography Project. It turns out
+ that we can't just safely call `idna.encode`: it can explode for
+ wildcard names. This avoids that problem.
+ """
+ import idna
+
+ for prefix in [u'*.', u'.']:
+ if name.startswith(prefix):
+ name = name[len(prefix):]
+ return prefix.encode('ascii') + idna.encode(name)
+ return idna.encode(name)
+
+ name = idna_encode(name)
+ if sys.version_info >= (3, 0):
+ name = name.decode('utf-8')
+ return name
+
+
+def get_subj_alt_name(peer_cert):
+ """
+ Given an PyOpenSSL certificate, provides all the subject alternative names.
+ """
+ # Pass the cert to cryptography, which has much better APIs for this.
+ # This is technically using private APIs, but should work across all
+ # relevant versions until PyOpenSSL gets something proper for this.
+ cert = _Certificate(openssl_backend, peer_cert._x509)
+
+ # We want to find the SAN extension. Ask Cryptography to locate it (it's
+ # faster than looping in Python)
+ try:
+ ext = cert.extensions.get_extension_for_class(
+ x509.SubjectAlternativeName
+ ).value
+ except x509.ExtensionNotFound:
+ # No such extension, return the empty list.
+ return []
+ except (x509.DuplicateExtension, x509.UnsupportedExtension,
+ x509.UnsupportedGeneralNameType, UnicodeError) as e:
+ # A problem has been found with the quality of the certificate. Assume
+ # no SAN field is present.
+ log.warning(
+ "A problem was encountered with the certificate that prevented "
+ "urllib3 from finding the SubjectAlternativeName field. This can "
+ "affect certificate validation. The error was %s",
+ e,
+ )
+ return []
+
+ # We want to return dNSName and iPAddress fields. We need to cast the IPs
+ # back to strings because the match_hostname function wants them as
+ # strings.
+ # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
+ # decoded. This is pretty frustrating, but that's what the standard library
+ # does with certificates, and so we need to attempt to do the same.
+ names = [
+ ('DNS', _dnsname_to_stdlib(name))
+ for name in ext.get_values_for_type(x509.DNSName)
+ ]
+ names.extend(
+ ('IP Address', str(name))
+ for name in ext.get_values_for_type(x509.IPAddress)
+ )
+
+ return names
+
+
+class WrappedSocket(object):
+ '''API-compatibility wrapper for Python OpenSSL's Connection-class.
+
+ Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+ collector of pypy.
+ '''
+
+ def __init__(self, connection, socket, suppress_ragged_eofs=True):
+ self.connection = connection
+ self.socket = socket
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+ self._closed = False
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, *args, **kwargs):
+ try:
+ data = self.connection.recv(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return b''
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return b''
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv(*args, **kwargs)
+ else:
+ return data
+
+ def recv_into(self, *args, **kwargs):
+ try:
+ return self.connection.recv_into(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return 0
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return 0
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv_into(*args, **kwargs)
+
+ def settimeout(self, timeout):
+ return self.socket.settimeout(timeout)
+
+ def _send_until_done(self, data):
+ while True:
+ try:
+ return self.connection.send(data)
+ except OpenSSL.SSL.WantWriteError:
+ wr = util.wait_for_write(self.socket, self.socket.gettimeout())
+ if not wr:
+ raise timeout()
+ continue
+ except OpenSSL.SSL.SysCallError as e:
+ raise SocketError(str(e))
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ # FIXME rethrow compatible exceptions should we ever use this
+ self.connection.shutdown()
+
+ def close(self):
+ if self._makefile_refs < 1:
+ try:
+ self._closed = True
+ return self.connection.close()
+ except OpenSSL.SSL.Error:
+ return
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ x509 = self.connection.get_peer_certificate()
+
+ if not x509:
+ return x509
+
+ if binary_form:
+ return OpenSSL.crypto.dump_certificate(
+ OpenSSL.crypto.FILETYPE_ASN1,
+ x509)
+
+ return {
+ 'subject': (
+ (('commonName', x509.get_subject().CN),),
+ ),
+ 'subjectAltName': get_subj_alt_name(x509)
+ }
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ makefile = backport_makefile
+
+WrappedSocket.makefile = makefile
+
+
+class PyOpenSSLContext(object):
+ """
+ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
+ for translating the interface of the standard library ``SSLContext`` object
+ to calls into PyOpenSSL.
+ """
+ def __init__(self, protocol):
+ self.protocol = _openssl_versions[protocol]
+ self._ctx = OpenSSL.SSL.Context(self.protocol)
+ self._options = 0
+ self.check_hostname = False
+
+ @property
+ def options(self):
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ self._options = value
+ self._ctx.set_options(value)
+
+ @property
+ def verify_mode(self):
+ return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._ctx.set_verify(
+ _stdlib_to_openssl_verify[value],
+ _verify_callback
+ )
+
+ def set_default_verify_paths(self):
+ self._ctx.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ if isinstance(ciphers, six.text_type):
+ ciphers = ciphers.encode('utf-8')
+ self._ctx.set_cipher_list(ciphers)
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ if cafile is not None:
+ cafile = cafile.encode('utf-8')
+ if capath is not None:
+ capath = capath.encode('utf-8')
+ self._ctx.load_verify_locations(cafile, capath)
+ if cadata is not None:
+ self._ctx.load_verify_locations(BytesIO(cadata))
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._ctx.use_certificate_file(certfile)
+ if password is not None:
+ self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
+ self._ctx.use_privatekey_file(keyfile or certfile)
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ cnx = OpenSSL.SSL.Connection(self._ctx, sock)
+
+ if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
+ server_hostname = server_hostname.encode('utf-8')
+
+ if server_hostname is not None:
+ cnx.set_tlsext_host_name(server_hostname)
+
+ cnx.set_connect_state()
+
+ while True:
+ try:
+ cnx.do_handshake()
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(sock, sock.gettimeout())
+ if not rd:
+ raise timeout('select timed out')
+ continue
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError('bad handshake: %r' % e)
+ break
+
+ return WrappedSocket(cnx, sock)
+
+
+def _verify_callback(cnx, x509, err_no, err_depth, return_code):
+ return err_no == 0
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
new file mode 100644
index 0000000..fcc3011
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
@@ -0,0 +1,808 @@
+# SPDX-License-Identifier: MIT
+"""
+SecureTranport support for urllib3 via ctypes.
+
+This makes platform-native TLS available to urllib3 users on macOS without the
+use of a compiler. This is an important feature because the Python Package
+Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
+that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
+this is to give macOS users an alternative solution to the problem, and that
+solution is to use SecureTransport.
+
+We use ctypes here because this solution must not require a compiler. That's
+because pip is not allowed to require a compiler either.
+
+This is not intended to be a seriously long-term solution to this problem.
+The hope is that PEP 543 will eventually solve this issue for us, at which
+point we can retire this contrib module. But in the short term, we need to
+solve the impending tire fire that is Python on Mac without this kind of
+contrib module. So...here we are.
+
+To use this module, simply import and inject it::
+
+ import urllib3.contrib.securetransport
+ urllib3.contrib.securetransport.inject_into_urllib3()
+
+Happy TLSing!
+"""
+from __future__ import absolute_import
+
+import contextlib
+import ctypes
+import errno
+import os.path
+import shutil
+import socket
+import ssl
+import threading
+import weakref
+
+from .. import util
+from ._securetransport.bindings import (
+ Security, SecurityConst, CoreFoundation
+)
+from ._securetransport.low_level import (
+ _assert_no_error, _cert_array_from_pem, _temporary_keychain,
+ _load_client_cert_chain
+)
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+try:
+ memoryview(b'')
+except NameError:
+ raise ImportError("SecureTransport only works on Pythons with memoryview")
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works
+HAS_SNI = True
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+# This dictionary is used by the read callback to obtain a handle to the
+# calling wrapped socket. This is a pretty silly approach, but for now it'll
+# do. I feel like I should be able to smuggle a handle to the wrapped socket
+# directly in the SSLConnectionRef, but for now this approach will work I
+# guess.
+#
+# We need to lock around this structure for inserts, but we don't do it for
+# reads/writes in the callbacks. The reasoning here goes as follows:
+#
+# 1. It is not possible to call into the callbacks before the dictionary is
+# populated, so once in the callback the id must be in the dictionary.
+# 2. The callbacks don't mutate the dictionary, they only read from it, and
+# so cannot conflict with any of the insertions.
+#
+# This is good: if we had to lock in the callbacks we'd drastically slow down
+# the performance of this code.
+_connection_refs = weakref.WeakValueDictionary()
+_connection_ref_lock = threading.Lock()
+
+# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
+# for no better reason than we need *a* limit, and this one is right there.
+SSL_WRITE_BLOCKSIZE = 16384
+
+# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
+# individual cipher suites. We need to do this becuase this is how
+# SecureTransport wants them.
+CIPHER_SUITES = [
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
+]
+
+# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
+# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
+_protocol_to_min_max = {
+ ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv2"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
+ SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
+ )
+if hasattr(ssl, "PROTOCOL_SSLv3"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
+ SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
+ SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
+ SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_2"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
+ SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
+ )
+if hasattr(ssl, "PROTOCOL_TLS"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
+
+
+def inject_into_urllib3():
+ """
+ Monkey-patch urllib3 with SecureTransport-backed SSL-support.
+ """
+ util.ssl_.SSLContext = SecureTransportContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_SECURETRANSPORT = True
+ util.ssl_.IS_SECURETRANSPORT = True
+
+
+def extract_from_urllib3():
+ """
+ Undo monkey-patching by :func:`inject_into_urllib3`.
+ """
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_SECURETRANSPORT = False
+ util.ssl_.IS_SECURETRANSPORT = False
+
+
+def _read_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport read callback. This is called by ST to request that data
+ be returned from the socket.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ requested_length = data_length_pointer[0]
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ read_count = 0
+ buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
+ buffer_view = memoryview(buffer)
+
+ try:
+ while read_count < requested_length:
+ if timeout is None or timeout >= 0:
+ readables = util.wait_for_read([base_socket], timeout)
+ if not readables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+
+ # We need to tell ctypes that we have a buffer that can be
+ # written to. Upsettingly, we do that like this:
+ chunk_size = base_socket.recv_into(
+ buffer_view[read_count:requested_length]
+ )
+ read_count += chunk_size
+ if not chunk_size:
+ if not read_count:
+ return SecurityConst.errSSLClosedGraceful
+ break
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = read_count
+
+ if read_count != requested_length:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+def _write_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport write callback. This is called by ST to request that data
+ actually be sent on the network.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ bytes_to_write = data_length_pointer[0]
+ data = ctypes.string_at(data_buffer, bytes_to_write)
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ sent = 0
+
+ try:
+ while sent < bytes_to_write:
+ if timeout is None or timeout >= 0:
+ writables = util.wait_for_write([base_socket], timeout)
+ if not writables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+ chunk_sent = base_socket.send(data)
+ sent += chunk_sent
+
+ # This has some needless copying here, but I'm not sure there's
+ # much value in optimising this data path.
+ data = data[chunk_sent:]
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = sent
+ if sent != bytes_to_write:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+# We need to keep these two objects references alive: if they get GC'd while
+# in use then SecureTransport could attempt to call a function that is in freed
+# memory. That would be...uh...bad. Yeah, that's the word. Bad.
+_read_callback_pointer = Security.SSLReadFunc(_read_callback)
+_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
+
+
+class WrappedSocket(object):
+ """
+ API-compatibility wrapper for Python's OpenSSL wrapped socket object.
+
+ Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
+ collector of PyPy.
+ """
+ def __init__(self, socket):
+ self.socket = socket
+ self.context = None
+ self._makefile_refs = 0
+ self._closed = False
+ self._exception = None
+ self._keychain = None
+ self._keychain_dir = None
+ self._client_cert_chain = None
+
+ # We save off the previously-configured timeout and then set it to
+ # zero. This is done because we use select and friends to handle the
+ # timeouts, but if we leave the timeout set on the lower socket then
+ # Python will "kindly" call select on that socket again for us. Avoid
+ # that by forcing the timeout to zero.
+ self._timeout = self.socket.gettimeout()
+ self.socket.settimeout(0)
+
+ @contextlib.contextmanager
+ def _raise_on_error(self):
+ """
+ A context manager that can be used to wrap calls that do I/O from
+ SecureTransport. If any of the I/O callbacks hit an exception, this
+ context manager will correctly propagate the exception after the fact.
+ This avoids silently swallowing those exceptions.
+
+ It also correctly forces the socket closed.
+ """
+ self._exception = None
+
+ # We explicitly don't catch around this yield because in the unlikely
+ # event that an exception was hit in the block we don't want to swallow
+ # it.
+ yield
+ if self._exception is not None:
+ exception, self._exception = self._exception, None
+ self.close()
+ raise exception
+
+ def _set_ciphers(self):
+ """
+ Sets up the allowed ciphers. By default this matches the set in
+ util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
+ custom and doesn't allow changing at this time, mostly because parsing
+ OpenSSL cipher strings is going to be a freaking nightmare.
+ """
+ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
+ result = Security.SSLSetEnabledCiphers(
+ self.context, ciphers, len(CIPHER_SUITES)
+ )
+ _assert_no_error(result)
+
+ def _custom_validate(self, verify, trust_bundle):
+ """
+ Called when we have set custom validation. We do this in two cases:
+ first, when cert validation is entirely disabled; and second, when
+ using a custom trust DB.
+ """
+ # If we disabled cert validation, just say: cool.
+ if not verify:
+ return
+
+ # We want data in memory, so load it up.
+ if os.path.isfile(trust_bundle):
+ with open(trust_bundle, 'rb') as f:
+ trust_bundle = f.read()
+
+ cert_array = None
+ trust = Security.SecTrustRef()
+
+ try:
+ # Get a CFArray that contains the certs we want.
+ cert_array = _cert_array_from_pem(trust_bundle)
+
+ # Ok, now the hard part. We want to get the SecTrustRef that ST has
+ # created for this connection, shove our CAs into it, tell ST to
+ # ignore everything else it knows, and then ask if it can build a
+ # chain. This is a buuuunch of code.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ raise ssl.SSLError("Failed to copy trust reference")
+
+ result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
+ _assert_no_error(result)
+
+ result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
+ _assert_no_error(result)
+
+ trust_result = Security.SecTrustResultType()
+ result = Security.SecTrustEvaluate(
+ trust, ctypes.byref(trust_result)
+ )
+ _assert_no_error(result)
+ finally:
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ if cert_array is None:
+ CoreFoundation.CFRelease(cert_array)
+
+ # Ok, now we can look at what the result was.
+ successes = (
+ SecurityConst.kSecTrustResultUnspecified,
+ SecurityConst.kSecTrustResultProceed
+ )
+ if trust_result.value not in successes:
+ raise ssl.SSLError(
+ "certificate verify failed, error code: %d" %
+ trust_result.value
+ )
+
+ def handshake(self,
+ server_hostname,
+ verify,
+ trust_bundle,
+ min_version,
+ max_version,
+ client_cert,
+ client_key,
+ client_key_passphrase):
+ """
+ Actually performs the TLS handshake. This is run automatically by
+ wrapped socket, and shouldn't be needed in user code.
+ """
+ # First, we do the initial bits of connection setup. We need to create
+ # a context, set its I/O funcs, and set the connection reference.
+ self.context = Security.SSLCreateContext(
+ None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
+ )
+ result = Security.SSLSetIOFuncs(
+ self.context, _read_callback_pointer, _write_callback_pointer
+ )
+ _assert_no_error(result)
+
+ # Here we need to compute the handle to use. We do this by taking the
+ # id of self modulo 2**31 - 1. If this is already in the dictionary, we
+ # just keep incrementing by one until we find a free space.
+ with _connection_ref_lock:
+ handle = id(self) % 2147483647
+ while handle in _connection_refs:
+ handle = (handle + 1) % 2147483647
+ _connection_refs[handle] = self
+
+ result = Security.SSLSetConnection(self.context, handle)
+ _assert_no_error(result)
+
+ # If we have a server hostname, we should set that too.
+ if server_hostname:
+ if not isinstance(server_hostname, bytes):
+ server_hostname = server_hostname.encode('utf-8')
+
+ result = Security.SSLSetPeerDomainName(
+ self.context, server_hostname, len(server_hostname)
+ )
+ _assert_no_error(result)
+
+ # Setup the ciphers.
+ self._set_ciphers()
+
+ # Set the minimum and maximum TLS versions.
+ result = Security.SSLSetProtocolVersionMin(self.context, min_version)
+ _assert_no_error(result)
+ result = Security.SSLSetProtocolVersionMax(self.context, max_version)
+ _assert_no_error(result)
+
+ # If there's a trust DB, we need to use it. We do that by telling
+ # SecureTransport to break on server auth. We also do that if we don't
+ # want to validate the certs at all: we just won't actually do any
+ # authing in that case.
+ if not verify or trust_bundle is not None:
+ result = Security.SSLSetSessionOption(
+ self.context,
+ SecurityConst.kSSLSessionOptionBreakOnServerAuth,
+ True
+ )
+ _assert_no_error(result)
+
+ # If there's a client cert, we need to use it.
+ if client_cert:
+ self._keychain, self._keychain_dir = _temporary_keychain()
+ self._client_cert_chain = _load_client_cert_chain(
+ self._keychain, client_cert, client_key
+ )
+ result = Security.SSLSetCertificate(
+ self.context, self._client_cert_chain
+ )
+ _assert_no_error(result)
+
+ while True:
+ with self._raise_on_error():
+ result = Security.SSLHandshake(self.context)
+
+ if result == SecurityConst.errSSLWouldBlock:
+ raise socket.timeout("handshake timed out")
+ elif result == SecurityConst.errSSLServerAuthCompleted:
+ self._custom_validate(verify, trust_bundle)
+ continue
+ else:
+ _assert_no_error(result)
+ break
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, bufsiz):
+ buffer = ctypes.create_string_buffer(bufsiz)
+ bytes_read = self.recv_into(buffer, bufsiz)
+ data = buffer[:bytes_read]
+ return data
+
+ def recv_into(self, buffer, nbytes=None):
+ # Read short on EOF.
+ if self._closed:
+ return 0
+
+ if nbytes is None:
+ nbytes = len(buffer)
+
+ buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLRead(
+ self.context, buffer, nbytes, ctypes.byref(processed_bytes)
+ )
+
+ # There are some result codes that we want to treat as "not always
+ # errors". Specifically, those are errSSLWouldBlock,
+ # errSSLClosedGraceful, and errSSLClosedNoNotify.
+ if (result == SecurityConst.errSSLWouldBlock):
+ # If we didn't process any bytes, then this was just a time out.
+ # However, we can get errSSLWouldBlock in situations when we *did*
+ # read some data, and in those cases we should just read "short"
+ # and return.
+ if processed_bytes.value == 0:
+ # Timed out, no data read.
+ raise socket.timeout("recv timed out")
+ elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
+ # The remote peer has closed this connection. We should do so as
+ # well. Note that we don't actually return here because in
+ # principle this could actually be fired along with return data.
+ # It's unlikely though.
+ self.close()
+ else:
+ _assert_no_error(result)
+
+ # Ok, we read and probably succeeded. We should return whatever data
+ # was actually read.
+ return processed_bytes.value
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def send(self, data):
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLWrite(
+ self.context, data, len(data), ctypes.byref(processed_bytes)
+ )
+
+ if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
+ # Timed out
+ raise socket.timeout("send timed out")
+ else:
+ _assert_no_error(result)
+
+ # We sent, and probably succeeded. Tell them how much we sent.
+ return processed_bytes.value
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ with self._raise_on_error():
+ Security.SSLClose(self.context)
+
+ def close(self):
+ # TODO: should I do clean shutdown here? Do I have to?
+ if self._makefile_refs < 1:
+ self._closed = True
+ if self.context:
+ CoreFoundation.CFRelease(self.context)
+ self.context = None
+ if self._client_cert_chain:
+ CoreFoundation.CFRelease(self._client_cert_chain)
+ self._client_cert_chain = None
+ if self._keychain:
+ Security.SecKeychainDelete(self._keychain)
+ CoreFoundation.CFRelease(self._keychain)
+ shutil.rmtree(self._keychain_dir)
+ self._keychain = self._keychain_dir = None
+ return self.socket.close()
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ # Urgh, annoying.
+ #
+ # Here's how we do this:
+ #
+ # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
+ # connection.
+ # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
+ # 3. To get the CN, call SecCertificateCopyCommonName and process that
+ # string so that it's of the appropriate type.
+ # 4. To get the SAN, we need to do something a bit more complex:
+ # a. Call SecCertificateCopyValues to get the data, requesting
+ # kSecOIDSubjectAltName.
+ # b. Mess about with this dictionary to try to get the SANs out.
+ #
+ # This is gross. Really gross. It's going to be a few hundred LoC extra
+ # just to repeat something that SecureTransport can *already do*. So my
+ # operating assumption at this time is that what we want to do is
+ # instead to just flag to urllib3 that it shouldn't do its own hostname
+ # validation when using SecureTransport.
+ if not binary_form:
+ raise ValueError(
+ "SecureTransport only supports dumping binary certs"
+ )
+ trust = Security.SecTrustRef()
+ certdata = None
+ der_bytes = None
+
+ try:
+ # Grab the trust store.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ # Probably we haven't done the handshake yet. No biggie.
+ return None
+
+ cert_count = Security.SecTrustGetCertificateCount(trust)
+ if not cert_count:
+ # Also a case that might happen if we haven't handshaked.
+ # Handshook? Handshaken?
+ return None
+
+ leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
+ assert leaf
+
+ # Ok, now we want the DER bytes.
+ certdata = Security.SecCertificateCopyData(leaf)
+ assert certdata
+
+ data_length = CoreFoundation.CFDataGetLength(certdata)
+ data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
+ der_bytes = ctypes.string_at(data_buffer, data_length)
+ finally:
+ if certdata:
+ CoreFoundation.CFRelease(certdata)
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ return der_bytes
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ def makefile(self, mode="r", buffering=None, *args, **kwargs):
+ # We disable buffering with SecureTransport because it conflicts with
+ # the buffering that ST does internally (see issue #1153 for more).
+ buffering = 0
+ return backport_makefile(self, mode, buffering, *args, **kwargs)
+
+WrappedSocket.makefile = makefile
+
+
+class SecureTransportContext(object):
+ """
+ I am a wrapper class for the SecureTransport library, to translate the
+ interface of the standard library ``SSLContext`` object to calls into
+ SecureTransport.
+ """
+ def __init__(self, protocol):
+ self._min_version, self._max_version = _protocol_to_min_max[protocol]
+ self._options = 0
+ self._verify = False
+ self._trust_bundle = None
+ self._client_cert = None
+ self._client_key = None
+ self._client_key_passphrase = None
+
+ @property
+ def check_hostname(self):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ return True
+
+ @check_hostname.setter
+ def check_hostname(self, value):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ pass
+
+ @property
+ def options(self):
+ # TODO: Well, crap.
+ #
+ # So this is the bit of the code that is the most likely to cause us
+ # trouble. Essentially we need to enumerate all of the SSL options that
+ # users might want to use and try to see if we can sensibly translate
+ # them, or whether we should just ignore them.
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ # TODO: Update in line with above.
+ self._options = value
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._verify = True if value == ssl.CERT_REQUIRED else False
+
+ def set_default_verify_paths(self):
+ # So, this has to do something a bit weird. Specifically, what it does
+ # is nothing.
+ #
+ # This means that, if we had previously had load_verify_locations
+ # called, this does not undo that. We need to do that because it turns
+ # out that the rest of the urllib3 code will attempt to load the
+ # default verify paths if it hasn't been told about any paths, even if
+ # the context itself was sometime earlier. We resolve that by just
+ # ignoring it.
+ pass
+
+ def load_default_certs(self):
+ return self.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ # For now, we just require the default cipher string.
+ if ciphers != util.ssl_.DEFAULT_CIPHERS:
+ raise ValueError(
+ "SecureTransport doesn't support custom cipher strings"
+ )
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ # OK, we only really support cadata and cafile.
+ if capath is not None:
+ raise ValueError(
+ "SecureTransport does not support cert directories"
+ )
+
+ self._trust_bundle = cafile or cadata
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._client_cert = certfile
+ self._client_key = keyfile
+ self._client_cert_passphrase = password
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ # So, what do we do here? Firstly, we assert some properties. This is a
+ # stripped down shim, so there is some functionality we don't support.
+ # See PEP 543 for the real deal.
+ assert not server_side
+ assert do_handshake_on_connect
+ assert suppress_ragged_eofs
+
+ # Ok, we're good to go. Now we want to create the wrapped socket object
+ # and store it in the appropriate place.
+ wrapped_socket = WrappedSocket(sock)
+
+ # Now we can handshake
+ wrapped_socket.handshake(
+ server_hostname, self._verify, self._trust_bundle,
+ self._min_version, self._max_version, self._client_cert,
+ self._client_key, self._client_key_passphrase
+ )
+ return wrapped_socket
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
new file mode 100644
index 0000000..1cb7928
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
+"""
+This module contains provisional support for SOCKS proxies from within
+urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
+SOCKS5. To enable its functionality, either install PySocks or install this
+module with the ``socks`` extra.
+
+The SOCKS implementation supports the full range of urllib3 features. It also
+supports the following SOCKS features:
+
+- SOCKS4
+- SOCKS4a
+- SOCKS5
+- Usernames and passwords for the SOCKS proxy
+
+Known Limitations:
+
+- Currently PySocks does not support contacting remote websites via literal
+ IPv6 addresses. Any such connection attempt will fail. You must use a domain
+ name.
+- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
+ such connection attempt will fail.
+"""
+from __future__ import absolute_import
+
+try:
+ import socks
+except ImportError:
+ import warnings
+ from ..exceptions import DependencyWarning
+
+ warnings.warn((
+ 'SOCKS support in urllib3 requires the installation of optional '
+ 'dependencies: specifically, PySocks. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
+ ),
+ DependencyWarning
+ )
+ raise
+
+from socket import error as SocketError, timeout as SocketTimeout
+
+from ..connection import (
+ HTTPConnection, HTTPSConnection
+)
+from ..connectionpool import (
+ HTTPConnectionPool, HTTPSConnectionPool
+)
+from ..exceptions import ConnectTimeoutError, NewConnectionError
+from ..poolmanager import PoolManager
+from ..util.url import parse_url
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+
+class SOCKSConnection(HTTPConnection):
+ """
+ A plain-text HTTP connection that connects via a SOCKS proxy.
+ """
+ def __init__(self, *args, **kwargs):
+ self._socks_options = kwargs.pop('_socks_options')
+ super(SOCKSConnection, self).__init__(*args, **kwargs)
+
+ def _new_conn(self):
+ """
+ Establish a new connection via the SOCKS proxy.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = socks.create_connection(
+ (self.host, self.port),
+ proxy_type=self._socks_options['socks_version'],
+ proxy_addr=self._socks_options['proxy_host'],
+ proxy_port=self._socks_options['proxy_port'],
+ proxy_username=self._socks_options['username'],
+ proxy_password=self._socks_options['password'],
+ proxy_rdns=self._socks_options['rdns'],
+ timeout=self.timeout,
+ **extra_kw
+ )
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except socks.ProxyError as e:
+ # This is fragile as hell, but it seems to be the only way to raise
+ # useful errors here.
+ if e.socket_err:
+ error = e.socket_err
+ if isinstance(error, SocketTimeout):
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout)
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % error
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % e
+ )
+
+ except SocketError as e: # Defensive: PySocks should catch all these.
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+
+# We don't need to duplicate the Verified/Unverified distinction from
+# urllib3/connection.py here because the HTTPSConnection will already have been
+# correctly set to either the Verified or Unverified form by that module. This
+# means the SOCKSHTTPSConnection will automatically be the correct type.
+class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
+ pass
+
+
+class SOCKSHTTPConnectionPool(HTTPConnectionPool):
+ ConnectionCls = SOCKSConnection
+
+
+class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
+ ConnectionCls = SOCKSHTTPSConnection
+
+
+class SOCKSProxyManager(PoolManager):
+ """
+ A version of the urllib3 ProxyManager that routes connections via the
+ defined SOCKS proxy.
+ """
+ pool_classes_by_scheme = {
+ 'http': SOCKSHTTPConnectionPool,
+ 'https': SOCKSHTTPSConnectionPool,
+ }
+
+ def __init__(self, proxy_url, username=None, password=None,
+ num_pools=10, headers=None, **connection_pool_kw):
+ parsed = parse_url(proxy_url)
+
+ if parsed.scheme == 'socks5':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = False
+ elif parsed.scheme == 'socks5h':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = True
+ elif parsed.scheme == 'socks4':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = False
+ elif parsed.scheme == 'socks4a':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = True
+ else:
+ raise ValueError(
+ "Unable to determine SOCKS version from %s" % proxy_url
+ )
+
+ self.proxy_url = proxy_url
+
+ socks_options = {
+ 'socks_version': socks_version,
+ 'proxy_host': parsed.host,
+ 'proxy_port': parsed.port,
+ 'username': username,
+ 'password': password,
+ 'rdns': rdns
+ }
+ connection_pool_kw['_socks_options'] = socks_options
+
+ super(SOCKSProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw
+ )
+
+ self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/collectors/python.d.plugin/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
new file mode 100644
index 0000000..a71cabe
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
@@ -0,0 +1,247 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from .packages.six.moves.http_client import (
+ IncompleteRead as httplib_IncompleteRead
+)
+# Base Exceptions
+
+
+class HTTPError(Exception):
+ "Base exception used by this module."
+ pass
+
+
+class HTTPWarning(Warning):
+ "Base warning used by this module."
+ pass
+
+
+class PoolError(HTTPError):
+ "Base exception for errors caused within a pool."
+ def __init__(self, pool, message):
+ self.pool = pool
+ HTTPError.__init__(self, "%s: %s" % (pool, message))
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, None)
+
+
+class RequestError(PoolError):
+ "Base exception for PoolErrors that have associated URLs."
+ def __init__(self, pool, url, message):
+ self.url = url
+ PoolError.__init__(self, pool, message)
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, self.url, None)
+
+
+class SSLError(HTTPError):
+ "Raised when SSL certificate fails in an HTTPS connection."
+ pass
+
+
+class ProxyError(HTTPError):
+ "Raised when the connection to a proxy fails."
+ pass
+
+
+class DecodeError(HTTPError):
+ "Raised when automatic decoding based on Content-Type fails."
+ pass
+
+
+class ProtocolError(HTTPError):
+ "Raised when something unexpected happens mid-request/response."
+ pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
+# Leaf Exceptions
+
+class MaxRetryError(RequestError):
+ """Raised when the maximum number of retries is exceeded.
+
+ :param pool: The connection pool
+ :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+ :param string url: The requested Url
+ :param exceptions.Exception reason: The underlying error
+
+ """
+
+ def __init__(self, pool, url, reason=None):
+ self.reason = reason
+
+ message = "Max retries exceeded with url: %s (Caused by %r)" % (
+ url, reason)
+
+ RequestError.__init__(self, pool, url, message)
+
+
+class HostChangedError(RequestError):
+ "Raised when an existing pool gets a request for a foreign host."
+
+ def __init__(self, pool, url, retries=3):
+ message = "Tried to open a foreign host with url: %s" % url
+ RequestError.__init__(self, pool, url, message)
+ self.retries = retries
+
+
+class TimeoutStateError(HTTPError):
+ """ Raised when passing an invalid state to a timeout """
+ pass
+
+
+class TimeoutError(HTTPError):
+ """ Raised when a socket timeout error occurs.
+
+ Catching this error will catch both :exc:`ReadTimeoutErrors
+ <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
+ """
+ pass
+
+
+class ReadTimeoutError(TimeoutError, RequestError):
+ "Raised when a socket timeout occurs while receiving data from a server"
+ pass
+
+
+# This timeout error does not have a URL attached and needs to inherit from the
+# base HTTPError
+class ConnectTimeoutError(TimeoutError):
+ "Raised when a socket timeout occurs while connecting to a server"
+ pass
+
+
+class NewConnectionError(ConnectTimeoutError, PoolError):
+ "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
+ pass
+
+
+class EmptyPoolError(PoolError):
+ "Raised when a pool runs out of connections and no more are allowed."
+ pass
+
+
+class ClosedPoolError(PoolError):
+ "Raised when a request enters a pool after the pool has been closed."
+ pass
+
+
+class LocationValueError(ValueError, HTTPError):
+ "Raised when there is something wrong with a given URL input."
+ pass
+
+
+class LocationParseError(LocationValueError):
+ "Raised when get_host or similar fails to parse the URL input."
+
+ def __init__(self, location):
+ message = "Failed to parse: %s" % location
+ HTTPError.__init__(self, message)
+
+ self.location = location
+
+
+class ResponseError(HTTPError):
+ "Used as a container for an error reason supplied in a MaxRetryError."
+ GENERIC_ERROR = 'too many error responses'
+ SPECIFIC_ERROR = 'too many {status_code} error responses'
+
+
+class SecurityWarning(HTTPWarning):
+ "Warned when perfoming security reducing actions"
+ pass
+
+
+class SubjectAltNameWarning(SecurityWarning):
+ "Warned when connecting to a host with a certificate missing a SAN."
+ pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+ "Warned when making an unverified HTTPS request."
+ pass
+
+
+class SystemTimeWarning(SecurityWarning):
+ "Warned when system time is suspected to be wrong"
+ pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+ "Warned when certain SSL configuration is not available on a platform."
+ pass
+
+
+class SNIMissingWarning(HTTPWarning):
+ "Warned when making a HTTPS request without SNI available."
+ pass
+
+
+class DependencyWarning(HTTPWarning):
+ """
+ Warned when an attempt is made to import a module with missing optional
+ dependencies.
+ """
+ pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+ "Response needs to be chunked in order to read it as chunks."
+ pass
+
+
+class BodyNotHttplibCompatible(HTTPError):
+ """
+ Body should be httplib.HTTPResponse like (have an fp attribute which
+ returns raw chunks) for read_chunked().
+ """
+ pass
+
+
+class IncompleteRead(HTTPError, httplib_IncompleteRead):
+ """
+ Response length doesn't match expected Content-Length
+
+ Subclass of http_client.IncompleteRead to allow int value
+ for `partial` to avoid creating large objects on streamed
+ reads.
+ """
+ def __init__(self, partial, expected):
+ super(IncompleteRead, self).__init__(partial, expected)
+
+ def __repr__(self):
+ return ('IncompleteRead(%i bytes read, '
+ '%i more expected)' % (self.partial, self.expected))
+
+
+class InvalidHeader(HTTPError):
+ "The header provided was somehow invalid."
+ pass
+
+
+class ProxySchemeUnknown(AssertionError, ValueError):
+ "ProxyManager does not support the supplied scheme"
+ # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+ def __init__(self, scheme):
+ message = "Not supported proxy scheme %s" % scheme
+ super(ProxySchemeUnknown, self).__init__(message)
+
+
+class HeaderParsingError(HTTPError):
+ "Raised by assert_header_parsing, but we convert it to a log.warning statement."
+ def __init__(self, defects, unparsed_data):
+ message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
+ super(HeaderParsingError, self).__init__(message)
+
+
+class UnrewindableBodyError(HTTPError):
+ "urllib3 encountered an error when trying to rewind a body"
+ pass
diff --git a/collectors/python.d.plugin/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py
new file mode 100644
index 0000000..de7577b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/fields.py
@@ -0,0 +1,179 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import email.utils
+import mimetypes
+
+from .packages import six
+
+
+def guess_content_type(filename, default='application/octet-stream'):
+ """
+ Guess the "Content-Type" of a file.
+
+ :param filename:
+ The filename to guess the "Content-Type" of using :mod:`mimetypes`.
+ :param default:
+ If no "Content-Type" can be guessed, default to `default`.
+ """
+ if filename:
+ return mimetypes.guess_type(filename)[0] or default
+ return default
+
+
+def format_header_param(name, value):
+ """
+ Helper function to format and quote a single header parameter.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows RFC 2231, as
+ suggested by RFC 2388 Section 4.4.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ if not any(ch in value for ch in '"\\\r\n'):
+ result = '%s="%s"' % (name, value)
+ try:
+ result.encode('ascii')
+ except (UnicodeEncodeError, UnicodeDecodeError):
+ pass
+ else:
+ return result
+ if not six.PY3 and isinstance(value, six.text_type): # Python 2:
+ value = value.encode('utf-8')
+ value = email.utils.encode_rfc2231(value, 'utf-8')
+ value = '%s*=%s' % (name, value)
+ return value
+
+
+class RequestField(object):
+ """
+ A data container for request body parameters.
+
+ :param name:
+ The name of this request field.
+ :param data:
+ The data/value body.
+ :param filename:
+ An optional filename of the request field.
+ :param headers:
+ An optional dict-like object of headers to initially use for the field.
+ """
+ def __init__(self, name, data, filename=None, headers=None):
+ self._name = name
+ self._filename = filename
+ self.data = data
+ self.headers = {}
+ if headers:
+ self.headers = dict(headers)
+
+ @classmethod
+ def from_tuples(cls, fieldname, value):
+ """
+ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
+
+ Supports constructing :class:`~urllib3.fields.RequestField` from
+ parameter of key/value strings AND key/filetuple. A filetuple is a
+ (filename, data, MIME type) tuple where the MIME type is optional.
+ For example::
+
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+
+ Field names and filenames must be unicode.
+ """
+ if isinstance(value, tuple):
+ if len(value) == 3:
+ filename, data, content_type = value
+ else:
+ filename, data = value
+ content_type = guess_content_type(filename)
+ else:
+ filename = None
+ content_type = None
+ data = value
+
+ request_param = cls(fieldname, data, filename=filename)
+ request_param.make_multipart(content_type=content_type)
+
+ return request_param
+
+ def _render_part(self, name, value):
+ """
+ Overridable helper function to format a single header parameter.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ return format_header_param(name, value)
+
+ def _render_parts(self, header_parts):
+ """
+ Helper function to format and quote a single header.
+
+ Useful for single headers that are composed of multiple items. E.g.,
+ 'Content-Disposition' fields.
+
+ :param header_parts:
+ A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
+ as `k1="v1"; k2="v2"; ...`.
+ """
+ parts = []
+ iterable = header_parts
+ if isinstance(header_parts, dict):
+ iterable = header_parts.items()
+
+ for name, value in iterable:
+ if value is not None:
+ parts.append(self._render_part(name, value))
+
+ return '; '.join(parts)
+
+ def render_headers(self):
+ """
+ Renders the headers for this request field.
+ """
+ lines = []
+
+ sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
+ for sort_key in sort_keys:
+ if self.headers.get(sort_key, False):
+ lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
+
+ for header_name, header_value in self.headers.items():
+ if header_name not in sort_keys:
+ if header_value:
+ lines.append('%s: %s' % (header_name, header_value))
+
+ lines.append('\r\n')
+ return '\r\n'.join(lines)
+
+ def make_multipart(self, content_disposition=None, content_type=None,
+ content_location=None):
+ """
+ Makes this request field into a multipart request field.
+
+ This method overrides "Content-Disposition", "Content-Type" and
+ "Content-Location" headers to the request parameter.
+
+ :param content_type:
+ The 'Content-Type' of the request body.
+ :param content_location:
+ The 'Content-Location' of the request body.
+
+ """
+ self.headers['Content-Disposition'] = content_disposition or 'form-data'
+ self.headers['Content-Disposition'] += '; '.join([
+ '', self._render_parts(
+ (('name', self._name), ('filename', self._filename))
+ )
+ ])
+ self.headers['Content-Type'] = content_type
+ self.headers['Content-Location'] = content_location
diff --git a/collectors/python.d.plugin/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
new file mode 100644
index 0000000..3febc9c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import codecs
+
+from uuid import uuid4
+from io import BytesIO
+
+from .packages import six
+from .packages.six import b
+from .fields import RequestField
+
+writer = codecs.lookup('utf-8')[3]
+
+
+def choose_boundary():
+ """
+ Our embarrassingly-simple replacement for mimetools.choose_boundary.
+ """
+ return uuid4().hex
+
+
+def iter_field_objects(fields):
+ """
+ Iterate over fields.
+
+ Supports list of (k, v) tuples and dicts, and lists of
+ :class:`~urllib3.fields.RequestField`.
+
+ """
+ if isinstance(fields, dict):
+ i = six.iteritems(fields)
+ else:
+ i = iter(fields)
+
+ for field in i:
+ if isinstance(field, RequestField):
+ yield field
+ else:
+ yield RequestField.from_tuples(*field)
+
+
+def iter_fields(fields):
+ """
+ .. deprecated:: 1.6
+
+ Iterate over fields.
+
+ The addition of :class:`~urllib3.fields.RequestField` makes this function
+ obsolete. Instead, use :func:`iter_field_objects`, which returns
+ :class:`~urllib3.fields.RequestField` objects.
+
+ Supports list of (k, v) tuples and dicts.
+ """
+ if isinstance(fields, dict):
+ return ((k, v) for k, v in six.iteritems(fields))
+
+ return ((k, v) for k, v in fields)
+
+
+def encode_multipart_formdata(fields, boundary=None):
+ """
+ Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
+
+ :param fields:
+ Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
+
+ :param boundary:
+ If not specified, then a random boundary will be generated using
+ :func:`mimetools.choose_boundary`.
+ """
+ body = BytesIO()
+ if boundary is None:
+ boundary = choose_boundary()
+
+ for field in iter_field_objects(fields):
+ body.write(b('--%s\r\n' % (boundary)))
+
+ writer(body).write(field.render_headers())
+ data = field.data
+
+ if isinstance(data, int):
+ data = str(data) # Backwards compatibility
+
+ if isinstance(data, six.text_type):
+ writer(body).write(data)
+ else:
+ body.write(data)
+
+ body.write(b'\r\n')
+
+ body.write(b('--%s--\r\n' % (boundary)))
+
+ content_type = str('multipart/form-data; boundary=%s' % boundary)
+
+ return body.getvalue(), content_type
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
new file mode 100644
index 0000000..170e974
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import ssl_match_hostname
+
+__all__ = ('ssl_match_hostname', )
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
new file mode 100644
index 0000000..8ab122f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
+"""
+backports.makefile
+~~~~~~~~~~~~~~~~~~
+
+Backports the Python 3 ``socket.makefile`` method for use with anything that
+wants to create a "fake" socket object.
+"""
+import io
+
+from socket import SocketIO
+
+
+def backport_makefile(self, mode="r", buffering=None, encoding=None,
+ errors=None, newline=None):
+ """
+ Backport of ``socket.makefile`` from Python 3.5.
+ """
+ if not set(mode) <= set(["r", "w", "b"]):
+ raise ValueError(
+ "invalid mode %r (only r, w, b allowed)" % (mode,)
+ )
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = SocketIO(self, rawmode)
+ self._makefile_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
new file mode 100644
index 0000000..9f7c0e6
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
@@ -0,0 +1,260 @@
+# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
+# Passes Python2.7's test suite and incorporates all the latest updates.
+# Copyright 2009 Raymond Hettinger, released under the MIT License.
+# http://code.activestate.com/recipes/576693/
+# SPDX-License-Identifier: MIT
+try:
+ from thread import get_ident as _get_ident
+except ImportError:
+ from dummy_thread import get_ident as _get_ident
+
+try:
+ from _abcoll import KeysView, ValuesView, ItemsView
+except ImportError:
+ pass
+
+
+class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as for regular dictionaries.
+
+ # The internal self.__map dictionary maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. Signature is the same as for
+ regular dictionaries, but keyword arguments are not recommended
+ because their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__root = root = [] # sentinel node
+ root[:] = [root, root, None]
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link which goes at the end of the linked
+ # list, and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ root = self.__root
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which is
+ # then removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link_prev, link_next, key = self.__map.pop(key)
+ link_prev[1] = link_next
+ link_next[0] = link_prev
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ root = self.__root
+ curr = root[1]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[1]
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ root = self.__root
+ curr = root[0]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[0]
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ try:
+ for node in self.__map.itervalues():
+ del node[:]
+ root = self.__root
+ root[:] = [root, root, None]
+ self.__map.clear()
+ except AttributeError:
+ pass
+ dict.clear(self)
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ root = self.__root
+ if last:
+ link = root[0]
+ link_prev = link[0]
+ link_prev[1] = root
+ root[0] = link_prev
+ else:
+ link = root[1]
+ link_next = link[1]
+ root[1] = link_next
+ link_next[0] = root
+ key = link[2]
+ del self.__map[key]
+ value = dict.pop(self, key)
+ return key, value
+
+ # -- the following methods do not depend on the internal structure --
+
+ def keys(self):
+ 'od.keys() -> list of keys in od'
+ return list(self)
+
+ def values(self):
+ 'od.values() -> list of values in od'
+ return [self[key] for key in self]
+
+ def items(self):
+ 'od.items() -> list of (key, value) pairs in od'
+ return [(key, self[key]) for key in self]
+
+ def iterkeys(self):
+ 'od.iterkeys() -> an iterator over the keys in od'
+ return iter(self)
+
+ def itervalues(self):
+ 'od.itervalues -> an iterator over the values in od'
+ for k in self:
+ yield self[k]
+
+ def iteritems(self):
+ 'od.iteritems -> an iterator over the (key, value) items in od'
+ for k in self:
+ yield (k, self[k])
+
+ def update(*args, **kwds):
+ '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
+
+ If E is a dict instance, does: for k in E: od[k] = E[k]
+ If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
+ Or if E is an iterable of items, does: for k, v in E: od[k] = v
+ In either case, this is followed by: for k, v in F.items(): od[k] = v
+
+ '''
+ if len(args) > 2:
+ raise TypeError('update() takes at most 2 positional '
+ 'arguments (%d given)' % (len(args),))
+ elif not args:
+ raise TypeError('update() takes at least 1 argument (0 given)')
+ self = args[0]
+ # Make progressively weaker assumptions about "other"
+ other = ()
+ if len(args) == 2:
+ other = args[1]
+ if isinstance(other, dict):
+ for key in other:
+ self[key] = other[key]
+ elif hasattr(other, 'keys'):
+ for key in other.keys():
+ self[key] = other[key]
+ else:
+ for key, value in other:
+ self[key] = value
+ for key, value in kwds.items():
+ self[key] = value
+
+ __update = update # let subclasses override update without breaking __init__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def __repr__(self, _repr_running={}):
+ 'od.__repr__() <==> repr(od)'
+ call_key = id(self), _get_ident()
+ if call_key in _repr_running:
+ return '...'
+ _repr_running[call_key] = 1
+ try:
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+ finally:
+ del _repr_running[call_key]
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+ and values equal to v (which defaults to None).
+
+ '''
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and self.items() == other.items()
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ # -- the following methods are only used in Python 2.7 --
+
+ def viewkeys(self):
+ "od.viewkeys() -> a set-like object providing a view on od's keys"
+ return KeysView(self)
+
+ def viewvalues(self):
+ "od.viewvalues() -> an object providing a view on od's values"
+ return ValuesView(self)
+
+ def viewitems(self):
+ "od.viewitems() -> a set-like object providing a view on od's items"
+ return ItemsView(self)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
new file mode 100644
index 0000000..31df501
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
@@ -0,0 +1,852 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# SPDX-License-Identifier: MIT
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ if from_value is None:
+ raise value
+ raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ raise value from from_value
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
new file mode 100644
index 0000000..2aeeeff
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: MIT
+import sys
+
+try:
+ # Our match_hostname function is the same as 3.5's, so we only want to
+ # import the match_hostname function if it's at least that good.
+ if sys.version_info < (3, 5):
+ raise ImportError("Fallback to vendored code")
+
+ from ssl import CertificateError, match_hostname
+except ImportError:
+ try:
+ # Backport of the function from a pypi module
+ from backports.ssl_match_hostname import CertificateError, match_hostname
+ except ImportError:
+ # Our vendored copy
+ from ._implementation import CertificateError, match_hostname
+
+# Not needed, but documenting what we provide.
+__all__ = ('CertificateError', 'match_hostname')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
new file mode 100644
index 0000000..647e081
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
@@ -0,0 +1,156 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# SPDX-License-Identifier: Python-2.0
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching. This allows
+# backports.ssl_match_hostname to continue to be used all the way back to
+# python-2.4.
+try:
+ import ipaddress
+except ImportError:
+ ipaddress = None
+
+__version__ = '3.5.0.1'
+
+
+class CertificateError(ValueError):
+ pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r'.')
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count('*')
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn))
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+ return pat.match(hostname)
+
+
+def _to_unicode(obj):
+ if isinstance(obj, str) and sys.version_info < (3,):
+ obj = unicode(obj, encoding='ascii', errors='strict')
+ return obj
+
+def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ # Divergence from upstream: ipaddress can't handle byte str
+ ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+ return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED")
+ try:
+ # Divergence from upstream: ipaddress can't handle byte str
+ host_ip = ipaddress.ip_address(_to_unicode(hostname))
+ except ValueError:
+ # Not an IP address (common case)
+ host_ip = None
+ except UnicodeError:
+ # Divergence from upstream: Have to deal with ipaddress not taking
+ # byte strings. addresses should be all ascii, so we consider it not
+ # an ipaddress in this case
+ host_ip = None
+ except AttributeError:
+ # Divergence from upstream: Make ipaddress library optional
+ if ipaddress is None:
+ host_ip = None
+ else:
+ raise
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == 'IP Address':
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r "
+ "doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r "
+ "doesn't match %r"
+ % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or "
+ "subjectAltName fields were found")
diff --git a/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
new file mode 100644
index 0000000..adea9bc
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
@@ -0,0 +1,441 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import collections
+import functools
+import logging
+
+from ._collections import RecentlyUsedContainer
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from .connectionpool import port_by_scheme
+from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
+from .packages.six.moves.urllib.parse import urljoin
+from .request import RequestMethods
+from .util.url import parse_url
+from .util.retry import Retry
+
+
+__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
+
+
+log = logging.getLogger(__name__)
+
+SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
+ 'ssl_version', 'ca_cert_dir', 'ssl_context')
+
+# All known keyword arguments that could be provided to the pool manager, its
+# pools, or the underlying connections. This is used to construct a pool key.
+_key_fields = (
+ 'key_scheme', # str
+ 'key_host', # str
+ 'key_port', # int
+ 'key_timeout', # int or float or Timeout
+ 'key_retries', # int or Retry
+ 'key_strict', # bool
+ 'key_block', # bool
+ 'key_source_address', # str
+ 'key_key_file', # str
+ 'key_cert_file', # str
+ 'key_cert_reqs', # str
+ 'key_ca_certs', # str
+ 'key_ssl_version', # str
+ 'key_ca_cert_dir', # str
+ 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
+ 'key_maxsize', # int
+ 'key_headers', # dict
+ 'key__proxy', # parsed proxy url
+ 'key__proxy_headers', # dict
+ 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
+ 'key__socks_options', # dict
+ 'key_assert_hostname', # bool or string
+ 'key_assert_fingerprint', # str
+)
+
+#: The namedtuple class used to construct keys for the connection pool.
+#: All custom key schemes should include the fields in this key at a minimum.
+PoolKey = collections.namedtuple('PoolKey', _key_fields)
+
+
+def _default_key_normalizer(key_class, request_context):
+ """
+ Create a pool key out of a request context dictionary.
+
+ According to RFC 3986, both the scheme and host are case-insensitive.
+ Therefore, this function normalizes both before constructing the pool
+ key for an HTTPS request. If you wish to change this behaviour, provide
+ alternate callables to ``key_fn_by_scheme``.
+
+ :param key_class:
+ The class to use when constructing the key. This should be a namedtuple
+ with the ``scheme`` and ``host`` keys at a minimum.
+ :type key_class: namedtuple
+ :param request_context:
+ A dictionary-like object that contain the context for a request.
+ :type request_context: dict
+
+ :return: A namedtuple that can be used as a connection pool key.
+ :rtype: PoolKey
+ """
+ # Since we mutate the dictionary, make a copy first
+ context = request_context.copy()
+ context['scheme'] = context['scheme'].lower()
+ context['host'] = context['host'].lower()
+
+ # These are both dictionaries and need to be transformed into frozensets
+ for key in ('headers', '_proxy_headers', '_socks_options'):
+ if key in context and context[key] is not None:
+ context[key] = frozenset(context[key].items())
+
+ # The socket_options key may be a list and needs to be transformed into a
+ # tuple.
+ socket_opts = context.get('socket_options')
+ if socket_opts is not None:
+ context['socket_options'] = tuple(socket_opts)
+
+ # Map the kwargs to the names in the namedtuple - this is necessary since
+ # namedtuples can't have fields starting with '_'.
+ for key in list(context.keys()):
+ context['key_' + key] = context.pop(key)
+
+ # Default to ``None`` for keys missing from the context
+ for field in key_class._fields:
+ if field not in context:
+ context[field] = None
+
+ return key_class(**context)
+
+
+#: A dictionary that maps a scheme to a callable that creates a pool key.
+#: This can be used to alter the way pool keys are constructed, if desired.
+#: Each PoolManager makes a copy of this dictionary so they can be configured
+#: globally here, or individually on the instance.
+key_fn_by_scheme = {
+ 'http': functools.partial(_default_key_normalizer, PoolKey),
+ 'https': functools.partial(_default_key_normalizer, PoolKey),
+}
+
+pool_classes_by_scheme = {
+ 'http': HTTPConnectionPool,
+ 'https': HTTPSConnectionPool,
+}
+
+
+class PoolManager(RequestMethods):
+ """
+ Allows for arbitrary requests while transparently keeping track of
+ necessary connection pools for you.
+
+ :param num_pools:
+ Number of connection pools to cache before discarding the least
+ recently used pool.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param \\**connection_pool_kw:
+ Additional parameters are used to create fresh
+ :class:`urllib3.connectionpool.ConnectionPool` instances.
+
+ Example::
+
+ >>> manager = PoolManager(num_pools=2)
+ >>> r = manager.request('GET', 'http://google.com/')
+ >>> r = manager.request('GET', 'http://google.com/mail')
+ >>> r = manager.request('GET', 'http://yahoo.com/')
+ >>> len(manager.pools)
+ 2
+
+ """
+
+ proxy = None
+
+ def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
+ RequestMethods.__init__(self, headers)
+ self.connection_pool_kw = connection_pool_kw
+ self.pools = RecentlyUsedContainer(num_pools,
+ dispose_func=lambda p: p.close())
+
+ # Locally set the pool classes and keys so other PoolManagers can
+ # override them.
+ self.pool_classes_by_scheme = pool_classes_by_scheme
+ self.key_fn_by_scheme = key_fn_by_scheme.copy()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.clear()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def _new_pool(self, scheme, host, port, request_context=None):
+ """
+ Create a new :class:`ConnectionPool` based on host, port, scheme, and
+ any additional pool keyword arguments.
+
+ If ``request_context`` is provided, it is provided as keyword arguments
+ to the pool class used. This method is used to actually create the
+ connection pools handed out by :meth:`connection_from_url` and
+ companion methods. It is intended to be overridden for customization.
+ """
+ pool_cls = self.pool_classes_by_scheme[scheme]
+ if request_context is None:
+ request_context = self.connection_pool_kw.copy()
+
+ # Although the context has everything necessary to create the pool,
+ # this function has historically only used the scheme, host, and port
+ # in the positional args. When an API change is acceptable these can
+ # be removed.
+ for key in ('scheme', 'host', 'port'):
+ request_context.pop(key, None)
+
+ if scheme == 'http':
+ for kw in SSL_KEYWORDS:
+ request_context.pop(kw, None)
+
+ return pool_cls(host, port, **request_context)
+
+ def clear(self):
+ """
+ Empty our store of pools and direct them all to close.
+
+ This will not affect in-flight connections, but they will not be
+ re-used after completion.
+ """
+ self.pools.clear()
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ """
+ Get a :class:`ConnectionPool` based on the host, port, and scheme.
+
+ If ``port`` isn't given, it will be derived from the ``scheme`` using
+ ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
+ provided, it is merged with the instance's ``connection_pool_kw``
+ variable and used to create the new connection pool, if one is
+ needed.
+ """
+
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ request_context = self._merge_pool_kwargs(pool_kwargs)
+ request_context['scheme'] = scheme or 'http'
+ if not port:
+ port = port_by_scheme.get(request_context['scheme'].lower(), 80)
+ request_context['port'] = port
+ request_context['host'] = host
+
+ return self.connection_from_context(request_context)
+
+ def connection_from_context(self, request_context):
+ """
+ Get a :class:`ConnectionPool` based on the request context.
+
+ ``request_context`` must at least contain the ``scheme`` key and its
+ value must be a key in ``key_fn_by_scheme`` instance variable.
+ """
+ scheme = request_context['scheme'].lower()
+ pool_key_constructor = self.key_fn_by_scheme[scheme]
+ pool_key = pool_key_constructor(request_context)
+
+ return self.connection_from_pool_key(pool_key, request_context=request_context)
+
+ def connection_from_pool_key(self, pool_key, request_context=None):
+ """
+ Get a :class:`ConnectionPool` based on the provided pool key.
+
+ ``pool_key`` should be a namedtuple that only contains immutable
+ objects. At a minimum it must have the ``scheme``, ``host``, and
+ ``port`` fields.
+ """
+ with self.pools.lock:
+ # If the scheme, host, or port doesn't match existing open
+ # connections, open a new ConnectionPool.
+ pool = self.pools.get(pool_key)
+ if pool:
+ return pool
+
+ # Make a fresh ConnectionPool of the desired type
+ scheme = request_context['scheme']
+ host = request_context['host']
+ port = request_context['port']
+ pool = self._new_pool(scheme, host, port, request_context=request_context)
+ self.pools[pool_key] = pool
+
+ return pool
+
+ def connection_from_url(self, url, pool_kwargs=None):
+ """
+ Similar to :func:`urllib3.connectionpool.connection_from_url`.
+
+ If ``pool_kwargs`` is not provided and a new pool needs to be
+ constructed, ``self.connection_pool_kw`` is used to initialize
+ the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
+ is provided, it is used instead. Note that if a new pool does not
+ need to be created for the request, the provided ``pool_kwargs`` are
+ not used.
+ """
+ u = parse_url(url)
+ return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
+ pool_kwargs=pool_kwargs)
+
+ def _merge_pool_kwargs(self, override):
+ """
+ Merge a dictionary of override values for self.connection_pool_kw.
+
+ This does not modify self.connection_pool_kw and returns a new dict.
+ Any keys in the override dictionary with a value of ``None`` are
+ removed from the merged dictionary.
+ """
+ base_pool_kwargs = self.connection_pool_kw.copy()
+ if override:
+ for key, value in override.items():
+ if value is None:
+ try:
+ del base_pool_kwargs[key]
+ except KeyError:
+ pass
+ else:
+ base_pool_kwargs[key] = value
+ return base_pool_kwargs
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ """
+ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
+ with custom cross-host redirect logic and only sends the request-uri
+ portion of the ``url``.
+
+ The given ``url`` parameter must be absolute, such that an appropriate
+ :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
+ """
+ u = parse_url(url)
+ conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
+
+ kw['assert_same_host'] = False
+ kw['redirect'] = False
+ if 'headers' not in kw:
+ kw['headers'] = self.headers
+
+ if self.proxy is not None and u.scheme == "http":
+ response = conn.urlopen(method, url, **kw)
+ else:
+ response = conn.urlopen(method, u.request_uri, **kw)
+
+ redirect_location = redirect and response.get_redirect_location()
+ if not redirect_location:
+ return response
+
+ # Support relative URLs for redirecting.
+ redirect_location = urljoin(url, redirect_location)
+
+ # RFC 7231, Section 6.4.4
+ if response.status == 303:
+ method = 'GET'
+
+ retries = kw.get('retries')
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect)
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=conn)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise
+ return response
+
+ kw['retries'] = retries
+ kw['redirect'] = redirect
+
+ log.info("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(method, redirect_location, **kw)
+
+
+class ProxyManager(PoolManager):
+ """
+ Behaves just like :class:`PoolManager`, but sends all requests through
+ the defined proxy, using the CONNECT method for HTTPS URLs.
+
+ :param proxy_url:
+ The URL of the proxy to be used.
+
+ :param proxy_headers:
+ A dictionary contaning headers that will be sent to the proxy. In case
+ of HTTP they are being sent with each request, while in the
+ HTTPS/CONNECT case they are sent only once. Could be used for proxy
+ authentication.
+
+ Example:
+ >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
+ >>> r1 = proxy.request('GET', 'http://google.com/')
+ >>> r2 = proxy.request('GET', 'http://httpbin.org/')
+ >>> len(proxy.pools)
+ 1
+ >>> r3 = proxy.request('GET', 'https://httpbin.org/')
+ >>> r4 = proxy.request('GET', 'https://twitter.com/')
+ >>> len(proxy.pools)
+ 3
+
+ """
+
+ def __init__(self, proxy_url, num_pools=10, headers=None,
+ proxy_headers=None, **connection_pool_kw):
+
+ if isinstance(proxy_url, HTTPConnectionPool):
+ proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
+ proxy_url.port)
+ proxy = parse_url(proxy_url)
+ if not proxy.port:
+ port = port_by_scheme.get(proxy.scheme, 80)
+ proxy = proxy._replace(port=port)
+
+ if proxy.scheme not in ("http", "https"):
+ raise ProxySchemeUnknown(proxy.scheme)
+
+ self.proxy = proxy
+ self.proxy_headers = proxy_headers or {}
+
+ connection_pool_kw['_proxy'] = self.proxy
+ connection_pool_kw['_proxy_headers'] = self.proxy_headers
+
+ super(ProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw)
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ if scheme == "https":
+ return super(ProxyManager, self).connection_from_host(
+ host, port, scheme, pool_kwargs=pool_kwargs)
+
+ return super(ProxyManager, self).connection_from_host(
+ self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
+
+ def _set_proxy_headers(self, url, headers=None):
+ """
+ Sets headers needed by proxies: specifically, the Accept and Host
+ headers. Only sets headers not provided by the user.
+ """
+ headers_ = {'Accept': '*/*'}
+
+ netloc = parse_url(url).netloc
+ if netloc:
+ headers_['Host'] = netloc
+
+ if headers:
+ headers_.update(headers)
+ return headers_
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
+ u = parse_url(url)
+
+ if u.scheme == "http":
+ # For proxied HTTPS requests, httplib sets the necessary headers
+ # on the CONNECT to the proxy. For HTTP, we'll definitely
+ # need to set 'Host' at the very least.
+ headers = kw.get('headers', self.headers)
+ kw['headers'] = self._set_proxy_headers(url, headers)
+
+ return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
+
+
+def proxy_from_url(url, **kw):
+ return ProxyManager(proxy_url=url, **kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py
new file mode 100644
index 0000000..f783319
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/request.py
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+
+from .filepost import encode_multipart_formdata
+from .packages.six.moves.urllib.parse import urlencode
+
+
+__all__ = ['RequestMethods']
+
+
+class RequestMethods(object):
+ """
+ Convenience mixin for classes who implement a :meth:`urlopen` method, such
+ as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
+ :class:`~urllib3.poolmanager.PoolManager`.
+
+ Provides behavior for making common types of HTTP request methods and
+ decides which type of request field encoding to use.
+
+ Specifically,
+
+ :meth:`.request_encode_url` is for sending requests whose fields are
+ encoded in the URL (such as GET, HEAD, DELETE).
+
+ :meth:`.request_encode_body` is for sending requests whose fields are
+ encoded in the *body* of the request using multipart or www-form-urlencoded
+ (such as for POST, PUT, PATCH).
+
+ :meth:`.request` is for making any kind of request, it will look up the
+ appropriate encoding format and use one of the above two methods to make
+ the request.
+
+ Initializer parameters:
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+ """
+
+ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
+
+ def __init__(self, headers=None):
+ self.headers = headers or {}
+
+ def urlopen(self, method, url, body=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **kw): # Abstract
+ raise NotImplemented("Classes extending RequestMethods must implement "
+ "their own ``urlopen`` method.")
+
+ def request(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the appropriate encoding of
+ ``fields`` based on the ``method`` used.
+
+ This is a convenience method that requires the least amount of manual
+ effort. It can be used in most situations, while still having the
+ option to drop down to more specific methods when necessary, such as
+ :meth:`request_encode_url`, :meth:`request_encode_body`,
+ or even the lowest level :meth:`urlopen`.
+ """
+ method = method.upper()
+
+ if method in self._encode_url_methods:
+ return self.request_encode_url(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+ else:
+ return self.request_encode_body(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+
+ def request_encode_url(self, method, url, fields=None, headers=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the url. This is useful for request methods like GET, HEAD, DELETE, etc.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': headers}
+ extra_kw.update(urlopen_kw)
+
+ if fields:
+ url += '?' + urlencode(fields)
+
+ return self.urlopen(method, url, **extra_kw)
+
+ def request_encode_body(self, method, url, fields=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the body. This is useful for request methods like POST, PUT, PATCH, etc.
+
+ When ``encode_multipart=True`` (default), then
+ :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
+ the payload with the appropriate content type. Otherwise
+ :meth:`urllib.urlencode` is used with the
+ 'application/x-www-form-urlencoded' content type.
+
+ Multipart encoding must be used when posting files, and it's reasonably
+ safe to use it in other times too. However, it may break request
+ signing, such as with OAuth.
+
+ Supports an optional ``fields`` parameter of key/value strings AND
+ key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
+ the MIME type is optional. For example::
+
+ fields = {
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(),
+ 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+ }
+
+ When uploading a file, providing a filename (the first parameter of the
+ tuple) is optional but recommended to best mimick behavior of browsers.
+
+ Note that if ``headers`` are supplied, the 'Content-Type' header will
+ be overwritten because it depends on the dynamic random boundary string
+ which is used to compose the body of the request. The random boundary
+ string can be explicitly set with the ``multipart_boundary`` parameter.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': {}}
+
+ if fields:
+ if 'body' in urlopen_kw:
+ raise TypeError(
+ "request got values for both 'fields' and 'body', can only specify one.")
+
+ if encode_multipart:
+ body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
+ else:
+ body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
+
+ extra_kw['body'] = body
+ extra_kw['headers'] = {'Content-Type': content_type}
+
+ extra_kw['headers'].update(headers)
+ extra_kw.update(urlopen_kw)
+
+ return self.urlopen(method, url, **extra_kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py
new file mode 100644
index 0000000..cf14a30
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/response.py
@@ -0,0 +1,623 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from contextlib import contextmanager
+import zlib
+import io
+import logging
+from socket import timeout as SocketTimeout
+from socket import error as SocketError
+
+from ._collections import HTTPHeaderDict
+from .exceptions import (
+ BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
+ ResponseNotChunked, IncompleteRead, InvalidHeader
+)
+from .packages.six import string_types as basestring, binary_type, PY3
+from .packages.six.moves import http_client as httplib
+from .connection import HTTPException, BaseSSLError
+from .util.response import is_fp_closed, is_response_to_head
+
+log = logging.getLogger(__name__)
+
+
+class DeflateDecoder(object):
+
+ def __init__(self):
+ self._first_try = True
+ self._data = binary_type()
+ self._obj = zlib.decompressobj()
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+
+ if not self._first_try:
+ return self._obj.decompress(data)
+
+ self._data += data
+ try:
+ decompressed = self._obj.decompress(data)
+ if decompressed:
+ self._first_try = False
+ self._data = None
+ return decompressed
+ except zlib.error:
+ self._first_try = False
+ self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
+ try:
+ return self.decompress(self._data)
+ finally:
+ self._data = None
+
+
+class GzipDecoder(object):
+
+ def __init__(self):
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+ return self._obj.decompress(data)
+
+
+def _get_decoder(mode):
+ if mode == 'gzip':
+ return GzipDecoder()
+
+ return DeflateDecoder()
+
+
+class HTTPResponse(io.IOBase):
+ """
+ HTTP Response container.
+
+ Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
+ loaded and decoded on-demand when the ``data`` property is accessed. This
+ class is also compatible with the Python standard library's :mod:`io`
+ module, and can hence be treated as a readable object in the context of that
+ framework.
+
+ Extra parameters for behaviour not present in httplib.HTTPResponse:
+
+ :param preload_content:
+ If True, the response's body will be preloaded during construction.
+
+ :param decode_content:
+ If True, attempts to decode specific content-encoding's based on headers
+ (like 'gzip' and 'deflate') will be skipped and raw data will be used
+ instead.
+
+ :param original_response:
+ When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
+ object, it's convenient to include the original for debug purposes. It's
+ otherwise unused.
+
+ :param retries:
+ The retries contains the last :class:`~urllib3.util.retry.Retry` that
+ was used during the request.
+
+ :param enforce_content_length:
+ Enforce content length checking. Body returned by server must match
+ value of Content-Length header, if present. Otherwise, raise error.
+ """
+
+ CONTENT_DECODERS = ['gzip', 'deflate']
+ REDIRECT_STATUSES = [301, 302, 303, 307, 308]
+
+ def __init__(self, body='', headers=None, status=0, version=0, reason=None,
+ strict=0, preload_content=True, decode_content=True,
+ original_response=None, pool=None, connection=None,
+ retries=None, enforce_content_length=False, request_method=None):
+
+ if isinstance(headers, HTTPHeaderDict):
+ self.headers = headers
+ else:
+ self.headers = HTTPHeaderDict(headers)
+ self.status = status
+ self.version = version
+ self.reason = reason
+ self.strict = strict
+ self.decode_content = decode_content
+ self.retries = retries
+ self.enforce_content_length = enforce_content_length
+
+ self._decoder = None
+ self._body = None
+ self._fp = None
+ self._original_response = original_response
+ self._fp_bytes_read = 0
+
+ if body and isinstance(body, (basestring, binary_type)):
+ self._body = body
+
+ self._pool = pool
+ self._connection = connection
+
+ if hasattr(body, 'read'):
+ self._fp = body
+
+ # Are we using the chunked-style of transfer encoding?
+ self.chunked = False
+ self.chunk_left = None
+ tr_enc = self.headers.get('transfer-encoding', '').lower()
+ # Don't incur the penalty of creating a list and then discarding it
+ encodings = (enc.strip() for enc in tr_enc.split(","))
+ if "chunked" in encodings:
+ self.chunked = True
+
+ # Determine length of response
+ self.length_remaining = self._init_length(request_method)
+
+ # If requested, preload the body.
+ if preload_content and not self._body:
+ self._body = self.read(decode_content=decode_content)
+
+ def get_redirect_location(self):
+ """
+ Should we redirect and where to?
+
+ :returns: Truthy redirect location string if we got a redirect status
+ code and valid location. ``None`` if redirect status and no
+ location. ``False`` if not a redirect status code.
+ """
+ if self.status in self.REDIRECT_STATUSES:
+ return self.headers.get('location')
+
+ return False
+
+ def release_conn(self):
+ if not self._pool or not self._connection:
+ return
+
+ self._pool._put_conn(self._connection)
+ self._connection = None
+
+ @property
+ def data(self):
+ # For backwords-compat with earlier urllib3 0.4 and earlier.
+ if self._body:
+ return self._body
+
+ if self._fp:
+ return self.read(cache_content=True)
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def tell(self):
+ """
+ Obtain the number of bytes pulled over the wire so far. May differ from
+ the amount of content returned by :meth:``HTTPResponse.read`` if bytes
+ are encoded on the wire (e.g, compressed).
+ """
+ return self._fp_bytes_read
+
+ def _init_length(self, request_method):
+ """
+ Set initial length value for Response content if available.
+ """
+ length = self.headers.get('content-length')
+
+ if length is not None and self.chunked:
+ # This Response will fail with an IncompleteRead if it can't be
+ # received as chunked. This method falls back to attempt reading
+ # the response before raising an exception.
+ log.warning("Received response with both Content-Length and "
+ "Transfer-Encoding set. This is expressly forbidden "
+ "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
+ "attempting to process response as Transfer-Encoding: "
+ "chunked.")
+ return None
+
+ elif length is not None:
+ try:
+ # RFC 7230 section 3.3.2 specifies multiple content lengths can
+ # be sent in a single Content-Length header
+ # (e.g. Content-Length: 42, 42). This line ensures the values
+ # are all valid ints and that as long as the `set` length is 1,
+ # all values are the same. Otherwise, the header is invalid.
+ lengths = set([int(val) for val in length.split(',')])
+ if len(lengths) > 1:
+ raise InvalidHeader("Content-Length contained multiple "
+ "unmatching values (%s)" % length)
+ length = lengths.pop()
+ except ValueError:
+ length = None
+ else:
+ if length < 0:
+ length = None
+
+ # Convert status to int for comparison
+ # In some cases, httplib returns a status of "_UNKNOWN"
+ try:
+ status = int(self.status)
+ except ValueError:
+ status = 0
+
+ # Check for responses that shouldn't include a body
+ if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
+ length = 0
+
+ return length
+
+ def _init_decoder(self):
+ """
+ Set-up the _decoder attribute if necessary.
+ """
+ # Note: content-encoding value should be case-insensitive, per RFC 7230
+ # Section 3.2
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
+ self._decoder = _get_decoder(content_encoding)
+
+ def _decode(self, data, decode_content, flush_decoder):
+ """
+ Decode the data passed in and potentially flush the decoder.
+ """
+ try:
+ if decode_content and self._decoder:
+ data = self._decoder.decompress(data)
+ except (IOError, zlib.error) as e:
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ raise DecodeError(
+ "Received response with content-encoding: %s, but "
+ "failed to decode it." % content_encoding, e)
+
+ if flush_decoder and decode_content:
+ data += self._flush_decoder()
+
+ return data
+
+ def _flush_decoder(self):
+ """
+ Flushes the decoder. Should only be called if the decoder is actually
+ being used.
+ """
+ if self._decoder:
+ buf = self._decoder.decompress(b'')
+ return buf + self._decoder.flush()
+
+ return b''
+
+ @contextmanager
+ def _error_catcher(self):
+ """
+ Catch low-level python exceptions, instead re-raising urllib3
+ variants, so that low-level exceptions are not leaked in the
+ high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ clean_exit = False
+
+ try:
+ try:
+ yield
+
+ except SocketTimeout:
+ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+ # there is yet no clean way to get at it from this context.
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except BaseSSLError as e:
+ # FIXME: Is there a better way to differentiate between SSLErrors?
+ if 'read operation timed out' not in str(e): # Defensive:
+ # This shouldn't happen but just in case we're missing an edge
+ # case, let's avoid swallowing SSL errors.
+ raise
+
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except (HTTPException, SocketError) as e:
+ # This includes IncompleteRead.
+ raise ProtocolError('Connection broken: %r' % e, e)
+
+ # If no exception is thrown, we should avoid cleaning up
+ # unnecessarily.
+ clean_exit = True
+ finally:
+ # If we didn't terminate cleanly, we need to throw away our
+ # connection.
+ if not clean_exit:
+ # The response may not be closed but we're not going to use it
+ # anymore so close it now to ensure that the connection is
+ # released back to the pool.
+ if self._original_response:
+ self._original_response.close()
+
+ # Closing the response may not actually be sufficient to close
+ # everything, so if we have a hold of the connection close that
+ # too.
+ if self._connection:
+ self._connection.close()
+
+ # If we hold the original response but it's closed now, we should
+ # return the connection back to the pool.
+ if self._original_response and self._original_response.isclosed():
+ self.release_conn()
+
+ def read(self, amt=None, decode_content=None, cache_content=False):
+ """
+ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
+ parameters: ``decode_content`` and ``cache_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param cache_content:
+ If True, will save the returned data such that the same result is
+ returned despite of the state of the underlying file object. This
+ is useful if you want the ``.data`` property to continue working
+ after having ``.read()`` the file object. (Overridden if ``amt`` is
+ set.)
+ """
+ self._init_decoder()
+ if decode_content is None:
+ decode_content = self.decode_content
+
+ if self._fp is None:
+ return
+
+ flush_decoder = False
+ data = None
+
+ with self._error_catcher():
+ if amt is None:
+ # cStringIO doesn't like amt=None
+ data = self._fp.read()
+ flush_decoder = True
+ else:
+ cache_content = False
+ data = self._fp.read(amt)
+ if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
+ # Close the connection when no data is returned
+ #
+ # This is redundant to what httplib/http.client _should_
+ # already do. However, versions of python released before
+ # December 15, 2012 (http://bugs.python.org/issue16298) do
+ # not properly close the connection in all cases. There is
+ # no harm in redundantly calling close.
+ self._fp.close()
+ flush_decoder = True
+ if self.enforce_content_length and self.length_remaining not in (0, None):
+ # This is an edge case that httplib failed to cover due
+ # to concerns of backward compatibility. We're
+ # addressing it here to make sure IncompleteRead is
+ # raised during streaming, so all calls with incorrect
+ # Content-Length are caught.
+ raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
+
+ if data:
+ self._fp_bytes_read += len(data)
+ if self.length_remaining is not None:
+ self.length_remaining -= len(data)
+
+ data = self._decode(data, decode_content, flush_decoder)
+
+ if cache_content:
+ self._body = data
+
+ return data
+
+ def stream(self, amt=2**16, decode_content=None):
+ """
+ A generator wrapper for the read() method. A call will block until
+ ``amt`` bytes have been read from the connection or until the
+ connection is closed.
+
+ :param amt:
+ How much of the content to read. The generator will return up to
+ much data per iteration, but may return less. This is particularly
+ likely when using compressed data. However, the empty string will
+ never be returned.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ if self.chunked and self.supports_chunked_reads():
+ for line in self.read_chunked(amt, decode_content=decode_content):
+ yield line
+ else:
+ while not is_fp_closed(self._fp):
+ data = self.read(amt=amt, decode_content=decode_content)
+
+ if data:
+ yield data
+
+ @classmethod
+ def from_httplib(ResponseCls, r, **response_kw):
+ """
+ Given an :class:`httplib.HTTPResponse` instance ``r``, return a
+ corresponding :class:`urllib3.response.HTTPResponse` object.
+
+ Remaining parameters are passed to the HTTPResponse constructor, along
+ with ``original_response=r``.
+ """
+ headers = r.msg
+
+ if not isinstance(headers, HTTPHeaderDict):
+ if PY3: # Python 3
+ headers = HTTPHeaderDict(headers.items())
+ else: # Python 2
+ headers = HTTPHeaderDict.from_httplib(headers)
+
+ # HTTPResponse objects in Python 3 don't have a .strict attribute
+ strict = getattr(r, 'strict', 0)
+ resp = ResponseCls(body=r,
+ headers=headers,
+ status=r.status,
+ version=r.version,
+ reason=r.reason,
+ strict=strict,
+ original_response=r,
+ **response_kw)
+ return resp
+
+ # Backwards-compatibility methods for httplib.HTTPResponse
+ def getheaders(self):
+ return self.headers
+
+ def getheader(self, name, default=None):
+ return self.headers.get(name, default)
+
+ # Overrides from io.IOBase
+ def close(self):
+ if not self.closed:
+ self._fp.close()
+
+ if self._connection:
+ self._connection.close()
+
+ @property
+ def closed(self):
+ if self._fp is None:
+ return True
+ elif hasattr(self._fp, 'isclosed'):
+ return self._fp.isclosed()
+ elif hasattr(self._fp, 'closed'):
+ return self._fp.closed
+ else:
+ return True
+
+ def fileno(self):
+ if self._fp is None:
+ raise IOError("HTTPResponse has no file to get a fileno from")
+ elif hasattr(self._fp, "fileno"):
+ return self._fp.fileno()
+ else:
+ raise IOError("The file-like object this HTTPResponse is wrapped "
+ "around has no file descriptor")
+
+ def flush(self):
+ if self._fp is not None and hasattr(self._fp, 'flush'):
+ return self._fp.flush()
+
+ def readable(self):
+ # This method is required for `io` module compatibility.
+ return True
+
+ def readinto(self, b):
+ # This method is required for `io` module compatibility.
+ temp = self.read(len(b))
+ if len(temp) == 0:
+ return 0
+ else:
+ b[:len(temp)] = temp
+ return len(temp)
+
+ def supports_chunked_reads(self):
+ """
+ Checks if the underlying file-like object looks like a
+ httplib.HTTPResponse object. We do this by testing for the fp
+ attribute. If it is present we assume it returns raw chunks as
+ processed by read_chunked().
+ """
+ return hasattr(self._fp, 'fp')
+
+ def _update_chunk_length(self):
+ # First, we'll figure out length of a chunk and then
+ # we'll try to read it from socket.
+ if self.chunk_left is not None:
+ return
+ line = self._fp.fp.readline()
+ line = line.split(b';', 1)[0]
+ try:
+ self.chunk_left = int(line, 16)
+ except ValueError:
+ # Invalid chunked protocol response, abort.
+ self.close()
+ raise httplib.IncompleteRead(line)
+
+ def _handle_chunk(self, amt):
+ returned_chunk = None
+ if amt is None:
+ chunk = self._fp._safe_read(self.chunk_left)
+ returned_chunk = chunk
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ elif amt < self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self.chunk_left = self.chunk_left - amt
+ returned_chunk = value
+ elif amt == self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ returned_chunk = value
+ else: # amt > self.chunk_left
+ returned_chunk = self._fp._safe_read(self.chunk_left)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ return returned_chunk
+
+ def read_chunked(self, amt=None, decode_content=None):
+ """
+ Similar to :meth:`HTTPResponse.read`, but with an additional
+ parameter: ``decode_content``.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ self._init_decoder()
+ # FIXME: Rewrite this method and make it a class with a better structured logic.
+ if not self.chunked:
+ raise ResponseNotChunked(
+ "Response is not chunked. "
+ "Header 'transfer-encoding: chunked' is missing.")
+ if not self.supports_chunked_reads():
+ raise BodyNotHttplibCompatible(
+ "Body should be httplib.HTTPResponse like. "
+ "It should have have an fp attribute which returns raw chunks.")
+
+ # Don't bother reading the body of a HEAD request.
+ if self._original_response and is_response_to_head(self._original_response):
+ self._original_response.close()
+ return
+
+ with self._error_catcher():
+ while True:
+ self._update_chunk_length()
+ if self.chunk_left == 0:
+ break
+ chunk = self._handle_chunk(amt)
+ decoded = self._decode(chunk, decode_content=decode_content,
+ flush_decoder=False)
+ if decoded:
+ yield decoded
+
+ if decode_content:
+ # On CPython and PyPy, we should never need to flush the
+ # decoder. However, on Jython we *might* need to, so
+ # lets defensively do it anyway.
+ decoded = self._flush_decoder()
+ if decoded: # Platform-specific: Jython.
+ yield decoded
+
+ # Chunk content ends with \r\n: discard it.
+ while True:
+ line = self._fp.fp.readline()
+ if not line:
+ # Some sites may not end with '\r\n'.
+ break
+ if line == b'\r\n':
+ break
+
+ # We read everything; close the "file".
+ if self._original_response:
+ self._original_response.close()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
new file mode 100644
index 0000000..bba628d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import make_headers
+from .response import is_fp_closed
+from .ssl_ import (
+ SSLContext,
+ HAS_SNI,
+ IS_PYOPENSSL,
+ IS_SECURETRANSPORT,
+ assert_fingerprint,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .timeout import (
+ current_time,
+ Timeout,
+)
+
+from .retry import Retry
+from .url import (
+ get_host,
+ parse_url,
+ split_first,
+ Url,
+)
+from .wait import (
+ wait_for_read,
+ wait_for_write
+)
+
+__all__ = (
+ 'HAS_SNI',
+ 'IS_PYOPENSSL',
+ 'IS_SECURETRANSPORT',
+ 'SSLContext',
+ 'Retry',
+ 'Timeout',
+ 'Url',
+ 'assert_fingerprint',
+ 'current_time',
+ 'is_connection_dropped',
+ 'is_fp_closed',
+ 'get_host',
+ 'parse_url',
+ 'make_headers',
+ 'resolve_cert_reqs',
+ 'resolve_ssl_version',
+ 'split_first',
+ 'ssl_wrap_socket',
+ 'wait_for_read',
+ 'wait_for_write'
+)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
new file mode 100644
index 0000000..3bd69e8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import socket
+from .wait import wait_for_read
+from .selectors import HAS_SELECT, SelectorError
+
+
+def is_connection_dropped(conn): # Platform-specific
+ """
+ Returns True if the connection is dropped and should be closed.
+
+ :param conn:
+ :class:`httplib.HTTPConnection` object.
+
+ Note: For platforms like AppEngine, this will always return ``False`` to
+ let the platform handle connection recycling transparently for us.
+ """
+ sock = getattr(conn, 'sock', False)
+ if sock is False: # Platform-specific: AppEngine
+ return False
+ if sock is None: # Connection already closed (such as by httplib).
+ return True
+
+ if not HAS_SELECT:
+ return False
+
+ try:
+ return bool(wait_for_read(sock, timeout=0.0))
+ except SelectorError:
+ return True
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None, socket_options=None):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ An host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ if host.startswith('['):
+ host = host.strip('[]')
+ err = None
+
+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+ # The original create_connection function always returns all records.
+ family = allowed_gai_family()
+
+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+
+ # If provided, set socket level options before connecting.
+ _set_socket_options(sock, socket_options)
+
+ if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except socket.error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+ sock = None
+
+ if err is not None:
+ raise err
+
+ raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+ if options is None:
+ return
+
+ for opt in options:
+ sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+ """This function is designed to work in the context of
+ getaddrinfo, where family=socket.AF_UNSPEC is the default and
+ will perform a DNS search for both IPv6 and IPv4 records."""
+
+ family = socket.AF_INET
+ if HAS_IPV6:
+ family = socket.AF_UNSPEC
+ return family
+
+
+def _has_ipv6(host):
+ """ Returns True if the system can bind an IPv6 address. """
+ sock = None
+ has_ipv6 = False
+
+ if socket.has_ipv6:
+ # has_ipv6 returns true if cPython was compiled with IPv6 support.
+ # It does not tell us if the system has IPv6 support enabled. To
+ # determine that we must bind to an IPv6 address.
+ # https://github.com/shazow/urllib3/pull/611
+ # https://bugs.python.org/issue658327
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind((host, 0))
+ has_ipv6 = True
+ except Exception:
+ pass
+
+ if sock:
+ sock.close()
+ return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6('::1')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
new file mode 100644
index 0000000..18f27b0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from base64 import b64encode
+
+from ..packages.six import b, integer_types
+from ..exceptions import UnrewindableBodyError
+
+ACCEPT_ENCODING = 'gzip,deflate'
+_FAILEDTELL = object()
+
+
+def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
+ basic_auth=None, proxy_basic_auth=None, disable_cache=None):
+ """
+ Shortcuts for generating request headers.
+
+ :param keep_alive:
+ If ``True``, adds 'connection: keep-alive' header.
+
+ :param accept_encoding:
+ Can be a boolean, list, or string.
+ ``True`` translates to 'gzip,deflate'.
+ List will get joined by comma.
+ String will be used as provided.
+
+ :param user_agent:
+ String representing the user-agent you want, such as
+ "python-urllib3/0.6"
+
+ :param basic_auth:
+ Colon-separated username:password string for 'authorization: basic ...'
+ auth header.
+
+ :param proxy_basic_auth:
+ Colon-separated username:password string for 'proxy-authorization: basic ...'
+ auth header.
+
+ :param disable_cache:
+ If ``True``, adds 'cache-control: no-cache' header.
+
+ Example::
+
+ >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+ {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+ >>> make_headers(accept_encoding=True)
+ {'accept-encoding': 'gzip,deflate'}
+ """
+ headers = {}
+ if accept_encoding:
+ if isinstance(accept_encoding, str):
+ pass
+ elif isinstance(accept_encoding, list):
+ accept_encoding = ','.join(accept_encoding)
+ else:
+ accept_encoding = ACCEPT_ENCODING
+ headers['accept-encoding'] = accept_encoding
+
+ if user_agent:
+ headers['user-agent'] = user_agent
+
+ if keep_alive:
+ headers['connection'] = 'keep-alive'
+
+ if basic_auth:
+ headers['authorization'] = 'Basic ' + \
+ b64encode(b(basic_auth)).decode('utf-8')
+
+ if proxy_basic_auth:
+ headers['proxy-authorization'] = 'Basic ' + \
+ b64encode(b(proxy_basic_auth)).decode('utf-8')
+
+ if disable_cache:
+ headers['cache-control'] = 'no-cache'
+
+ return headers
+
+
+def set_file_position(body, pos):
+ """
+ If a position is provided, move file to that point.
+ Otherwise, we'll attempt to record a position for future use.
+ """
+ if pos is not None:
+ rewind_body(body, pos)
+ elif getattr(body, 'tell', None) is not None:
+ try:
+ pos = body.tell()
+ except (IOError, OSError):
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body.
+ pos = _FAILEDTELL
+
+ return pos
+
+
+def rewind_body(body, body_pos):
+ """
+ Attempt to rewind body to a certain position.
+ Primarily used for request redirects and retries.
+
+ :param body:
+ File-like object that supports seek.
+
+ :param int pos:
+ Position to seek to in file.
+ """
+ body_seek = getattr(body, 'seek', None)
+ if body_seek is not None and isinstance(body_pos, integer_types):
+ try:
+ body_seek(body_pos)
+ except (IOError, OSError):
+ raise UnrewindableBodyError("An error occurred when rewinding request "
+ "body for redirect/retry.")
+ elif body_pos is _FAILEDTELL:
+ raise UnrewindableBodyError("Unable to record file position for rewinding "
+ "request body during a redirect/retry.")
+ else:
+ raise ValueError("body_pos must be of type integer, "
+ "instead it was %s." % type(body_pos))
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
new file mode 100644
index 0000000..e4cda93
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from ..packages.six.moves import http_client as httplib
+
+from ..exceptions import HeaderParsingError
+
+
+def is_fp_closed(obj):
+ """
+ Checks whether a given file-like object is closed.
+
+ :param obj:
+ The file-like object to check.
+ """
+
+ try:
+ # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+ # GH Issue #928
+ return obj.isclosed()
+ except AttributeError:
+ pass
+
+ try:
+ # Check via the official file-like-object way.
+ return obj.closed
+ except AttributeError:
+ pass
+
+ try:
+ # Check if the object is a container for another file-like object that
+ # gets released on exhaustion (e.g. HTTPResponse).
+ return obj.fp is None
+ except AttributeError:
+ pass
+
+ raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+ """
+ Asserts whether all headers have been successfully parsed.
+ Extracts encountered errors from the result of parsing headers.
+
+ Only works on Python 3.
+
+ :param headers: Headers to verify.
+ :type headers: `httplib.HTTPMessage`.
+
+ :raises urllib3.exceptions.HeaderParsingError:
+ If parsing errors are found.
+ """
+
+ # This will fail silently if we pass in the wrong kind of parameter.
+ # To make debugging easier add an explicit check.
+ if not isinstance(headers, httplib.HTTPMessage):
+ raise TypeError('expected httplib.Message, got {0}.'.format(
+ type(headers)))
+
+ defects = getattr(headers, 'defects', None)
+ get_payload = getattr(headers, 'get_payload', None)
+
+ unparsed_data = None
+ if get_payload: # Platform-specific: Python 3.
+ unparsed_data = get_payload()
+
+ if defects or unparsed_data:
+ raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+ """
+ Checks whether the request of a response has been a HEAD-request.
+ Handles the quirks of AppEngine.
+
+ :param conn:
+ :type conn: :class:`httplib.HTTPResponse`
+ """
+ # FIXME: Can we do this somehow without accessing private httplib _method?
+ method = response._method
+ if isinstance(method, int): # Platform-specific: Appengine
+ return method == 3
+ return method.upper() == 'HEAD'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
new file mode 100644
index 0000000..61e63af
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
@@ -0,0 +1,402 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import time
+import logging
+from collections import namedtuple
+from itertools import takewhile
+import email
+import re
+
+from ..exceptions import (
+ ConnectTimeoutError,
+ MaxRetryError,
+ ProtocolError,
+ ReadTimeoutError,
+ ResponseError,
+ InvalidHeader,
+)
+from ..packages import six
+
+
+log = logging.getLogger(__name__)
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
+ "status", "redirect_location"])
+
+
+class Retry(object):
+ """ Retry configuration.
+
+ Each retry attempt will create a new Retry object with updated values, so
+ they can be safely reused.
+
+ Retries can be defined as a default for a pool::
+
+ retries = Retry(connect=5, read=2, redirect=5)
+ http = PoolManager(retries=retries)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+ Retries can be disabled by passing ``False``::
+
+ response = http.request('GET', 'http://example.com/', retries=False)
+
+ Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+ retries are disabled, in which case the causing exception will be raised.
+
+ :param int total:
+ Total number of retries to allow. Takes precedence over other counts.
+
+ Set to ``None`` to remove this constraint and fall back on other
+ counts. It's a good idea to set this to some sensibly-high value to
+ account for unexpected edge cases and avoid infinite retry loops.
+
+ Set to ``0`` to fail on the first retry.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int connect:
+ How many connection-related errors to retry on.
+
+ These are errors raised before the request is sent to the remote server,
+ which we assume has not triggered the server to process the request.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int read:
+ How many times to retry on read errors.
+
+ These errors are raised after the request was sent to the server, so the
+ request may have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int redirect:
+ How many redirects to perform. Limit this to avoid infinite redirect
+ loops.
+
+ A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+ 308.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int status:
+ How many times to retry on bad status codes.
+
+ These are retries made on responses, where status code matches
+ ``status_forcelist``.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param iterable method_whitelist:
+ Set of uppercased HTTP method verbs that we should retry on.
+
+ By default, we only retry on methods which are considered to be
+ idempotent (multiple requests with the same parameters end with the
+ same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
+
+ Set to a ``False`` value to retry on any verb.
+
+ :param iterable status_forcelist:
+ A set of integer HTTP status codes that we should force a retry on.
+ A retry is initiated if the request method is in ``method_whitelist``
+ and the response status code is in ``status_forcelist``.
+
+ By default, this is disabled with ``None``.
+
+ :param float backoff_factor:
+ A backoff factor to apply between attempts after the second try
+ (most errors are resolved immediately by a second try without a
+ delay). urllib3 will sleep for::
+
+ {backoff factor} * (2 ^ ({number of total retries} - 1))
+
+ seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+ for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+ than :attr:`Retry.BACKOFF_MAX`.
+
+ By default, backoff is disabled (set to 0).
+
+ :param bool raise_on_redirect: Whether, if the number of redirects is
+ exhausted, to raise a MaxRetryError, or to return a response with a
+ response code in the 3xx range.
+
+ :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+ whether we should raise an exception, or return a response,
+ if status falls in ``status_forcelist`` range and retries have
+ been exhausted.
+
+ :param tuple history: The history of the request encountered during
+ each call to :meth:`~Retry.increment`. The list is in the order
+ the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+ :param bool respect_retry_after_header:
+ Whether to respect Retry-After header on status codes defined as
+ :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+ """
+
+ DEFAULT_METHOD_WHITELIST = frozenset([
+ 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
+
+ RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+ #: Maximum backoff time.
+ BACKOFF_MAX = 120
+
+ def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
+ method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
+ backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
+ history=None, respect_retry_after_header=True):
+
+ self.total = total
+ self.connect = connect
+ self.read = read
+ self.status = status
+
+ if redirect is False or total is False:
+ redirect = 0
+ raise_on_redirect = False
+
+ self.redirect = redirect
+ self.status_forcelist = status_forcelist or set()
+ self.method_whitelist = method_whitelist
+ self.backoff_factor = backoff_factor
+ self.raise_on_redirect = raise_on_redirect
+ self.raise_on_status = raise_on_status
+ self.history = history or tuple()
+ self.respect_retry_after_header = respect_retry_after_header
+
+ def new(self, **kw):
+ params = dict(
+ total=self.total,
+ connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
+ method_whitelist=self.method_whitelist,
+ status_forcelist=self.status_forcelist,
+ backoff_factor=self.backoff_factor,
+ raise_on_redirect=self.raise_on_redirect,
+ raise_on_status=self.raise_on_status,
+ history=self.history,
+ )
+ params.update(kw)
+ return type(self)(**params)
+
+ @classmethod
+ def from_int(cls, retries, redirect=True, default=None):
+ """ Backwards-compatibility for the old retries format."""
+ if retries is None:
+ retries = default if default is not None else cls.DEFAULT
+
+ if isinstance(retries, Retry):
+ return retries
+
+ redirect = bool(redirect) and None
+ new_retries = cls(retries, redirect=redirect)
+ log.debug("Converted retries value: %r -> %r", retries, new_retries)
+ return new_retries
+
+ def get_backoff_time(self):
+ """ Formula for computing the current backoff
+
+ :rtype: float
+ """
+ # We want to consider only the last consecutive errors sequence (Ignore redirects).
+ consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
+ reversed(self.history))))
+ if consecutive_errors_len <= 1:
+ return 0
+
+ backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+ return min(self.BACKOFF_MAX, backoff_value)
+
+ def parse_retry_after(self, retry_after):
+ # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
+ seconds = int(retry_after)
+ else:
+ retry_date_tuple = email.utils.parsedate(retry_after)
+ if retry_date_tuple is None:
+ raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+ retry_date = time.mktime(retry_date_tuple)
+ seconds = retry_date - time.time()
+
+ if seconds < 0:
+ seconds = 0
+
+ return seconds
+
+ def get_retry_after(self, response):
+ """ Get the value of Retry-After in seconds. """
+
+ retry_after = response.getheader("Retry-After")
+
+ if retry_after is None:
+ return None
+
+ return self.parse_retry_after(retry_after)
+
+ def sleep_for_retry(self, response=None):
+ retry_after = self.get_retry_after(response)
+ if retry_after:
+ time.sleep(retry_after)
+ return True
+
+ return False
+
+ def _sleep_backoff(self):
+ backoff = self.get_backoff_time()
+ if backoff <= 0:
+ return
+ time.sleep(backoff)
+
+ def sleep(self, response=None):
+ """ Sleep between retry attempts.
+
+ This method will respect a server's ``Retry-After`` response header
+ and sleep the duration of the time requested. If that is not present, it
+ will use an exponential backoff. By default, the backoff factor is 0 and
+ this method will return immediately.
+ """
+
+ if response:
+ slept = self.sleep_for_retry(response)
+ if slept:
+ return
+
+ self._sleep_backoff()
+
+ def _is_connection_error(self, err):
+ """ Errors when we're fairly sure that the server did not receive the
+ request, so it should be safe to retry.
+ """
+ return isinstance(err, ConnectTimeoutError)
+
+ def _is_read_error(self, err):
+ """ Errors that occur after the request has been started, so we should
+ assume that the server began processing it.
+ """
+ return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+ def _is_method_retryable(self, method):
+ """ Checks if a given HTTP method should be retried upon, depending if
+ it is included on the method whitelist.
+ """
+ if self.method_whitelist and method.upper() not in self.method_whitelist:
+ return False
+
+ return True
+
+ def is_retry(self, method, status_code, has_retry_after=False):
+ """ Is this method/status code retryable? (Based on whitelists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ if not self._is_method_retryable(method):
+ return False
+
+ if self.status_forcelist and status_code in self.status_forcelist:
+ return True
+
+ return (self.total and self.respect_retry_after_header and
+ has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
+
+ def is_exhausted(self):
+ """ Are we out of retries? """
+ retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+
+ return min(retry_counts) < 0
+
+ def increment(self, method=None, url=None, response=None, error=None,
+ _pool=None, _stacktrace=None):
+ """ Return a new Retry object with incremented retry counters.
+
+ :param response: A response object, or None, if the server did not
+ return a response.
+ :type response: :class:`~urllib3.response.HTTPResponse`
+ :param Exception error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: A new ``Retry`` object.
+ """
+ if self.total is False and error:
+ # Disabled, indicate to re-raise the error.
+ raise six.reraise(type(error), error, _stacktrace)
+
+ total = self.total
+ if total is not None:
+ total -= 1
+
+ connect = self.connect
+ read = self.read
+ redirect = self.redirect
+ status_count = self.status
+ cause = 'unknown'
+ status = None
+ redirect_location = None
+
+ if error and self._is_connection_error(error):
+ # Connect retry?
+ if connect is False:
+ raise six.reraise(type(error), error, _stacktrace)
+ elif connect is not None:
+ connect -= 1
+
+ elif error and self._is_read_error(error):
+ # Read retry?
+ if read is False or not self._is_method_retryable(method):
+ raise six.reraise(type(error), error, _stacktrace)
+ elif read is not None:
+ read -= 1
+
+ elif response and response.get_redirect_location():
+ # Redirect retry?
+ if redirect is not None:
+ redirect -= 1
+ cause = 'too many redirects'
+ redirect_location = response.get_redirect_location()
+ status = response.status
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and a the given method is in the whitelist
+ cause = ResponseError.GENERIC_ERROR
+ if response and response.status:
+ if status_count is not None:
+ status_count -= 1
+ cause = ResponseError.SPECIFIC_ERROR.format(
+ status_code=response.status)
+ status = response.status
+
+ history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
+
+ new_retry = self.new(
+ total=total,
+ connect=connect, read=read, redirect=redirect, status=status_count,
+ history=history)
+
+ if new_retry.is_exhausted():
+ raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+ log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+ return new_retry
+
+ def __repr__(self):
+ return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
+ 'read={self.read}, redirect={self.redirect}, status={self.status})').format(
+ cls=type(self), self=self)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
new file mode 100644
index 0000000..c0997b1
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
@@ -0,0 +1,582 @@
+# SPDX-License-Identifier: MIT
+# Backport of selectors.py from Python 3.5+ to support Python < 3.4
+# Also has the behavior specified in PEP 475 which is to retry syscalls
+# in the case of an EINTR error. This module is required because selectors34
+# does not follow this behavior and instead returns that no dile descriptor
+# events have occurred rather than retry the syscall. The decision to drop
+# support for select.devpoll is made to maintain 100% test coverage.
+
+import errno
+import math
+import select
+import socket
+import sys
+import time
+from collections import namedtuple, Mapping
+
+try:
+ monotonic = time.monotonic
+except (AttributeError, ImportError): # Python 3.3<
+ monotonic = time.time
+
+EVENT_READ = (1 << 0)
+EVENT_WRITE = (1 << 1)
+
+HAS_SELECT = True # Variable that shows whether the platform has a selector.
+_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
+_DEFAULT_SELECTOR = None
+
+
+class SelectorError(Exception):
+ def __init__(self, errcode):
+ super(SelectorError, self).__init__()
+ self.errno = errcode
+
+ def __repr__(self):
+ return "<SelectorError errno={0}>".format(self.errno)
+
+ def __str__(self):
+ return self.__repr__()
+
+
+def _fileobj_to_fd(fileobj):
+ """ Return a file descriptor from a file object. If
+ given an integer will simply return that integer back. """
+ if isinstance(fileobj, int):
+ fd = fileobj
+ else:
+ try:
+ fd = int(fileobj.fileno())
+ except (AttributeError, TypeError, ValueError):
+ raise ValueError("Invalid file object: {0!r}".format(fileobj))
+ if fd < 0:
+ raise ValueError("Invalid file descriptor: {0}".format(fd))
+ return fd
+
+
+# Determine which function to use to wrap system calls because Python 3.5+
+# already handles the case when system calls are interrupted.
+if sys.version_info >= (3, 5):
+ def _syscall_wrapper(func, _, *args, **kwargs):
+ """ This is the short-circuit version of the below logic
+ because in Python 3.5+ all system calls automatically restart
+ and recalculate their timeouts. """
+ try:
+ return func(*args, **kwargs)
+ except (OSError, IOError, select.error) as e:
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ raise SelectorError(errcode)
+else:
+ def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
+ """ Wrapper function for syscalls that could fail due to EINTR.
+ All functions should be retried if there is time left in the timeout
+ in accordance with PEP 475. """
+ timeout = kwargs.get("timeout", None)
+ if timeout is None:
+ expires = None
+ recalc_timeout = False
+ else:
+ timeout = float(timeout)
+ if timeout < 0.0: # Timeout less than 0 treated as no timeout.
+ expires = None
+ else:
+ expires = monotonic() + timeout
+
+ args = list(args)
+ if recalc_timeout and "timeout" not in kwargs:
+ raise ValueError(
+ "Timeout must be in args or kwargs to be recalculated")
+
+ result = _SYSCALL_SENTINEL
+ while result is _SYSCALL_SENTINEL:
+ try:
+ result = func(*args, **kwargs)
+ # OSError is thrown by select.select
+ # IOError is thrown by select.epoll.poll
+ # select.error is thrown by select.poll.poll
+ # Aren't we thankful for Python 3.x rework for exceptions?
+ except (OSError, IOError, select.error) as e:
+ # select.error wasn't a subclass of OSError in the past.
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ elif hasattr(e, "args"):
+ errcode = e.args[0]
+
+ # Also test for the Windows equivalent of EINTR.
+ is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
+ errcode == errno.WSAEINTR))
+
+ if is_interrupt:
+ if expires is not None:
+ current_time = monotonic()
+ if current_time > expires:
+ raise OSError(errno=errno.ETIMEDOUT)
+ if recalc_timeout:
+ if "timeout" in kwargs:
+ kwargs["timeout"] = expires - current_time
+ continue
+ if errcode:
+ raise SelectorError(errcode)
+ else:
+ raise
+ return result
+
+
+SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
+
+
+class _SelectorMapping(Mapping):
+ """ Mapping of file objects to selector keys """
+
+ def __init__(self, selector):
+ self._selector = selector
+
+ def __len__(self):
+ return len(self._selector._fd_to_key)
+
+ def __getitem__(self, fileobj):
+ try:
+ fd = self._selector._fileobj_lookup(fileobj)
+ return self._selector._fd_to_key[fd]
+ except KeyError:
+ raise KeyError("{0!r} is not registered.".format(fileobj))
+
+ def __iter__(self):
+ return iter(self._selector._fd_to_key)
+
+
+class BaseSelector(object):
+ """ Abstract Selector class
+
+ A selector supports registering file objects to be monitored
+ for specific I/O events.
+
+ A file object is a file descriptor or any object with a
+ `fileno()` method. An arbitrary object can be attached to the
+ file object which can be used for example to store context info,
+ a callback, etc.
+
+ A selector can use various implementations (select(), poll(), epoll(),
+ and kqueue()) depending on the platform. The 'DefaultSelector' class uses
+ the most efficient implementation for the current platform.
+ """
+ def __init__(self):
+ # Maps file descriptors to keys.
+ self._fd_to_key = {}
+
+ # Read-only mapping returned by get_map()
+ self._map = _SelectorMapping(self)
+
+ def _fileobj_lookup(self, fileobj):
+ """ Return a file descriptor from a file object.
+ This wraps _fileobj_to_fd() to do an exhaustive
+ search in case the object is invalid but we still
+ have it in our map. Used by unregister() so we can
+ unregister an object that was previously registered
+ even if it is closed. It is also used by _SelectorMapping
+ """
+ try:
+ return _fileobj_to_fd(fileobj)
+ except ValueError:
+
+ # Search through all our mapped keys.
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ return key.fd
+
+ # Raise ValueError after all.
+ raise
+
+ def register(self, fileobj, events, data=None):
+ """ Register a file object for a set of events to monitor. """
+ if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
+ raise ValueError("Invalid events: {0!r}".format(events))
+
+ key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
+
+ if key.fd in self._fd_to_key:
+ raise KeyError("{0!r} (FD {1}) is already registered"
+ .format(fileobj, key.fd))
+
+ self._fd_to_key[key.fd] = key
+ return key
+
+ def unregister(self, fileobj):
+ """ Unregister a file object from being monitored. """
+ try:
+ key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ # Getting the fileno of a closed socket on Windows errors with EBADF.
+ except socket.error as e: # Platform-specific: Windows.
+ if e.errno != errno.EBADF:
+ raise
+ else:
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ self._fd_to_key.pop(key.fd)
+ break
+ else:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+ return key
+
+ def modify(self, fileobj, events, data=None):
+ """ Change a registered file object monitored events and data. """
+ # NOTE: Some subclasses optimize this operation even further.
+ try:
+ key = self._fd_to_key[self._fileobj_lookup(fileobj)]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ if events != key.events:
+ self.unregister(fileobj)
+ key = self.register(fileobj, events, data)
+
+ elif data != key.data:
+ # Use a shortcut to update the data.
+ key = key._replace(data=data)
+ self._fd_to_key[key.fd] = key
+
+ return key
+
+ def select(self, timeout=None):
+ """ Perform the actual selection until some monitored file objects
+ are ready or the timeout expires. """
+ raise NotImplementedError()
+
+ def close(self):
+ """ Close the selector. This must be called to ensure that all
+ underlying resources are freed. """
+ self._fd_to_key.clear()
+ self._map = None
+
+ def get_key(self, fileobj):
+ """ Return the key associated with a registered file object. """
+ mapping = self.get_map()
+ if mapping is None:
+ raise RuntimeError("Selector is closed")
+ try:
+ return mapping[fileobj]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ def get_map(self):
+ """ Return a mapping of file objects to selector keys """
+ return self._map
+
+ def _key_from_fd(self, fd):
+ """ Return the key associated to a given file descriptor
+ Return None if it is not found. """
+ try:
+ return self._fd_to_key[fd]
+ except KeyError:
+ return None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+# Almost all platforms have select.select()
+if hasattr(select, "select"):
+ class SelectSelector(BaseSelector):
+ """ Select-based selector. """
+ def __init__(self):
+ super(SelectSelector, self).__init__()
+ self._readers = set()
+ self._writers = set()
+
+ def register(self, fileobj, events, data=None):
+ key = super(SelectSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ self._readers.add(key.fd)
+ if events & EVENT_WRITE:
+ self._writers.add(key.fd)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(SelectSelector, self).unregister(fileobj)
+ self._readers.discard(key.fd)
+ self._writers.discard(key.fd)
+ return key
+
+ def _select(self, r, w, timeout=None):
+ """ Wrapper for select.select because timeout is a positional arg """
+ return select.select(r, w, [], timeout)
+
+ def select(self, timeout=None):
+ # Selecting on empty lists on Windows errors out.
+ if not len(self._readers) and not len(self._writers):
+ return []
+
+ timeout = None if timeout is None else max(timeout, 0.0)
+ ready = []
+ r, w, _ = _syscall_wrapper(self._select, True, self._readers,
+ self._writers, timeout)
+ r = set(r)
+ w = set(w)
+ for fd in r | w:
+ events = 0
+ if fd in r:
+ events |= EVENT_READ
+ if fd in w:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+
+if hasattr(select, "poll"):
+ class PollSelector(BaseSelector):
+ """ Poll-based selector """
+ def __init__(self):
+ super(PollSelector, self).__init__()
+ self._poll = select.poll()
+
+ def register(self, fileobj, events, data=None):
+ key = super(PollSelector, self).register(fileobj, events, data)
+ event_mask = 0
+ if events & EVENT_READ:
+ event_mask |= select.POLLIN
+ if events & EVENT_WRITE:
+ event_mask |= select.POLLOUT
+ self._poll.register(key.fd, event_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(PollSelector, self).unregister(fileobj)
+ self._poll.unregister(key.fd)
+ return key
+
+ def _wrap_poll(self, timeout=None):
+ """ Wrapper function for select.poll.poll() so that
+ _syscall_wrapper can work with only seconds. """
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0
+ else:
+ # select.poll.poll() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ timeout = math.ceil(timeout * 1e3)
+
+ result = self._poll.poll(timeout)
+ return result
+
+ def select(self, timeout=None):
+ ready = []
+ fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.POLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.POLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+
+ return ready
+
+
+if hasattr(select, "epoll"):
+ class EpollSelector(BaseSelector):
+ """ Epoll-based selector """
+ def __init__(self):
+ super(EpollSelector, self).__init__()
+ self._epoll = select.epoll()
+
+ def fileno(self):
+ return self._epoll.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(EpollSelector, self).register(fileobj, events, data)
+ events_mask = 0
+ if events & EVENT_READ:
+ events_mask |= select.EPOLLIN
+ if events & EVENT_WRITE:
+ events_mask |= select.EPOLLOUT
+ _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(EpollSelector, self).unregister(fileobj)
+ try:
+ _syscall_wrapper(self._epoll.unregister, False, key.fd)
+ except SelectorError:
+ # This can occur when the fd was closed since registry.
+ pass
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0.0
+ else:
+ # select.epoll.poll() has a resolution of 1 millisecond
+ # but luckily takes seconds so we don't need a wrapper
+ # like PollSelector. Just for better rounding.
+ timeout = math.ceil(timeout * 1e3) * 1e-3
+ timeout = float(timeout)
+ else:
+ timeout = -1.0 # epoll.poll() must have a float.
+
+ # We always want at least 1 to ensure that select can be called
+ # with no file descriptors registered. Otherwise will fail.
+ max_events = max(len(self._fd_to_key), 1)
+
+ ready = []
+ fd_events = _syscall_wrapper(self._epoll.poll, True,
+ timeout=timeout,
+ maxevents=max_events)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.EPOLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.EPOLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+ def close(self):
+ self._epoll.close()
+ super(EpollSelector, self).close()
+
+
+if hasattr(select, "kqueue"):
+ class KqueueSelector(BaseSelector):
+ """ Kqueue / Kevent-based selector """
+ def __init__(self):
+ super(KqueueSelector, self).__init__()
+ self._kqueue = select.kqueue()
+
+ def fileno(self):
+ return self._kqueue.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(KqueueSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ if events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ return key
+
+ def unregister(self, fileobj):
+ key = super(KqueueSelector, self).unregister(fileobj)
+ if key.events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+ if key.events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ timeout = max(timeout, 0)
+
+ max_events = len(self._fd_to_key) * 2
+ ready_fds = {}
+
+ kevent_list = _syscall_wrapper(self._kqueue.control, True,
+ None, max_events, timeout)
+
+ for kevent in kevent_list:
+ fd = kevent.ident
+ event_mask = kevent.filter
+ events = 0
+ if event_mask == select.KQ_FILTER_READ:
+ events |= EVENT_READ
+ if event_mask == select.KQ_FILTER_WRITE:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ if key.fd not in ready_fds:
+ ready_fds[key.fd] = (key, events & key.events)
+ else:
+ old_events = ready_fds[key.fd][1]
+ ready_fds[key.fd] = (key, (events | old_events) & key.events)
+
+ return list(ready_fds.values())
+
+ def close(self):
+ self._kqueue.close()
+ super(KqueueSelector, self).close()
+
+
+if not hasattr(select, 'select'): # Platform-specific: AppEngine
+ HAS_SELECT = False
+
+
+def _can_allocate(struct):
+ """ Checks that select structs can be allocated by the underlying
+ operating system, not just advertised by the select module. We don't
+ check select() because we'll be hopeful that most platforms that
+ don't have it available will not advertise it. (ie: GAE) """
+ try:
+ # select.poll() objects won't fail until used.
+ if struct == 'poll':
+ p = select.poll()
+ p.poll(0)
+
+ # All others will fail on allocation.
+ else:
+ getattr(select, struct)().close()
+ return True
+ except (OSError, AttributeError) as e:
+ return False
+
+
+# Choose the best implementation, roughly:
+# kqueue == epoll > poll > select. Devpoll not supported. (See above)
+# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
+def DefaultSelector():
+ """ This function serves as a first call for DefaultSelector to
+ detect if the select module is being monkey-patched incorrectly
+ by eventlet, greenlet, and preserve proper behavior. """
+ global _DEFAULT_SELECTOR
+ if _DEFAULT_SELECTOR is None:
+ if _can_allocate('kqueue'):
+ _DEFAULT_SELECTOR = KqueueSelector
+ elif _can_allocate('epoll'):
+ _DEFAULT_SELECTOR = EpollSelector
+ elif _can_allocate('poll'):
+ _DEFAULT_SELECTOR = PollSelector
+ elif hasattr(select, 'select'):
+ _DEFAULT_SELECTOR = SelectSelector
+ else: # Platform-specific: AppEngine
+ raise ValueError('Platform does not have a selector')
+ return _DEFAULT_SELECTOR()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
new file mode 100644
index 0000000..ece3ec3
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
@@ -0,0 +1,338 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import errno
+import warnings
+import hmac
+
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
+
+
+SSLContext = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {
+ 32: md5,
+ 40: sha1,
+ 64: sha256,
+}
+
+
+def _const_compare_digest_backport(a, b):
+ """
+ Compare two digests of equal length in constant time.
+
+ The digests must be of type str/bytes.
+ Returns True if the digests match, and False otherwise.
+ """
+ result = abs(len(a) - len(b))
+ for l, r in zip(bytearray(a), bytearray(b)):
+ result |= l ^ r
+ return result == 0
+
+
+_const_compare_digest = getattr(hmac, 'compare_digest',
+ _const_compare_digest_backport)
+
+
+try: # Test for SSL features
+ import ssl
+ from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
+ from ssl import HAS_SNI # Has SNI?
+except ImportError:
+ pass
+
+
+try:
+ from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
+except ImportError:
+ OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+ OP_NO_COMPRESSION = 0x20000
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs and DSS for security reasons.
+DEFAULT_CIPHERS = ':'.join([
+ 'ECDH+AESGCM',
+ 'ECDH+CHACHA20',
+ 'DH+AESGCM',
+ 'DH+CHACHA20',
+ 'ECDH+AES256',
+ 'DH+AES256',
+ 'ECDH+AES128',
+ 'DH+AES',
+ 'RSA+AESGCM',
+ 'RSA+AES',
+ '!aNULL',
+ '!eNULL',
+ '!MD5',
+])
+
+try:
+ from ssl import SSLContext # Modern SSL?
+except ImportError:
+ import sys
+
+ class SSLContext(object): # Platform-specific: Python 2 & 3.1
+ supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
+ (3, 2) <= sys.version_info)
+
+ def __init__(self, protocol_version):
+ self.protocol = protocol_version
+ # Use default values from a real SSLContext
+ self.check_hostname = False
+ self.verify_mode = ssl.CERT_NONE
+ self.ca_certs = None
+ self.options = 0
+ self.certfile = None
+ self.keyfile = None
+ self.ciphers = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def load_verify_locations(self, cafile=None, capath=None):
+ self.ca_certs = cafile
+
+ if capath is not None:
+ raise SSLError("CA directories not supported in older Pythons")
+
+ def set_ciphers(self, cipher_suite):
+ if not self.supports_set_ciphers:
+ raise TypeError(
+ 'Your version of Python does not support setting '
+ 'a custom cipher suite. Please upgrade to Python '
+ '2.7, 3.2, or later if you need this functionality.'
+ )
+ self.ciphers = cipher_suite
+
+ def wrap_socket(self, socket, server_hostname=None, server_side=False):
+ warnings.warn(
+ 'A true SSLContext object is not available. This prevents '
+ 'urllib3 from configuring SSL appropriately and may cause '
+ 'certain SSL connections to fail. You can upgrade to a newer '
+ 'version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ InsecurePlatformWarning
+ )
+ kwargs = {
+ 'keyfile': self.keyfile,
+ 'certfile': self.certfile,
+ 'ca_certs': self.ca_certs,
+ 'cert_reqs': self.verify_mode,
+ 'ssl_version': self.protocol,
+ 'server_side': server_side,
+ }
+ if self.supports_set_ciphers: # Platform-specific: Python 2.7+
+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+ else: # Platform-specific: Python 2.6
+ return wrap_socket(socket, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+ """
+ Checks if given fingerprint matches the supplied certificate.
+
+ :param cert:
+ Certificate as bytes object.
+ :param fingerprint:
+ Fingerprint as string of hexdigits, can be interspersed by colons.
+ """
+
+ fingerprint = fingerprint.replace(':', '').lower()
+ digest_length = len(fingerprint)
+ hashfunc = HASHFUNC_MAP.get(digest_length)
+ if not hashfunc:
+ raise SSLError(
+ 'Fingerprint of invalid length: {0}'.format(fingerprint))
+
+ # We need encode() here for py32; works on py2 and p33.
+ fingerprint_bytes = unhexlify(fingerprint.encode())
+
+ cert_digest = hashfunc(cert).digest()
+
+ if not _const_compare_digest(cert_digest, fingerprint_bytes):
+ raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
+ .format(fingerprint, hexlify(cert_digest)))
+
+
+def resolve_cert_reqs(candidate):
+ """
+ Resolves the argument to a numeric constant, which can be passed to
+ the wrap_socket function/method from the ssl module.
+ Defaults to :data:`ssl.CERT_NONE`.
+ If given a string it is assumed to be the name of the constant in the
+ :mod:`ssl` module or its abbrevation.
+ (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+ If it's neither `None` nor a string we assume it is already the numeric
+ constant which can directly be passed to wrap_socket.
+ """
+ if candidate is None:
+ return CERT_NONE
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'CERT_' + candidate)
+ return res
+
+ return candidate
+
+
+def resolve_ssl_version(candidate):
+ """
+ like resolve_cert_reqs
+ """
+ if candidate is None:
+ return PROTOCOL_SSLv23
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'PROTOCOL_' + candidate)
+ return res
+
+ return candidate
+
+
+def create_urllib3_context(ssl_version=None, cert_reqs=None,
+ options=None, ciphers=None):
+ """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+ By default, this function does a lot of the same work that
+ ``ssl.create_default_context`` does on Python 3.4+. It:
+
+ - Disables SSLv2, SSLv3, and compression
+ - Sets a restricted set of server ciphers
+
+ If you wish to enable SSLv3, you can do::
+
+ from urllib3.util import ssl_
+ context = ssl_.create_urllib3_context()
+ context.options &= ~ssl_.OP_NO_SSLv3
+
+ You can do the same to enable compression (substituting ``COMPRESSION``
+ for ``SSLv3`` in the last line above).
+
+ :param ssl_version:
+ The desired protocol version to use. This will default to
+ PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+ the server and your installation of OpenSSL support.
+ :param cert_reqs:
+ Whether to require the certificate verification. This defaults to
+ ``ssl.CERT_REQUIRED``.
+ :param options:
+ Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+ ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
+ :param ciphers:
+ Which cipher suites to allow the server to select.
+ :returns:
+ Constructed SSLContext object with specified options
+ :rtype: SSLContext
+ """
+ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
+
+ # Setting the default here, as we may have no ssl module on import
+ cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+ if options is None:
+ options = 0
+ # SSLv2 is easily broken and is considered harmful and dangerous
+ options |= OP_NO_SSLv2
+ # SSLv3 has several problems and is now dangerous
+ options |= OP_NO_SSLv3
+ # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+ # (issue #309)
+ options |= OP_NO_COMPRESSION
+
+ context.options |= options
+
+ if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
+ context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+ context.verify_mode = cert_reqs
+ if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
+ return context
+
+
+def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
+ ca_certs=None, server_hostname=None,
+ ssl_version=None, ciphers=None, ssl_context=None,
+ ca_cert_dir=None):
+ """
+ All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+ the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+ :param server_hostname:
+ When SNI is supported, the expected hostname of the certificate
+ :param ssl_context:
+ A pre-made :class:`SSLContext` object. If none is provided, one will
+ be created using :func:`create_urllib3_context`.
+ :param ciphers:
+ A string of ciphers we wish the client to support. This is not
+ supported on Python 2.6 as the ssl module does not support it.
+ :param ca_cert_dir:
+ A directory containing CA certificates in multiple separate files, as
+ supported by OpenSSL's -CApath flag or the capath argument to
+ SSLContext.load_verify_locations().
+ """
+ context = ssl_context
+ if context is None:
+ # Note: This branch of code and all the variables in it are no longer
+ # used by urllib3 itself. We should consider deprecating and removing
+ # this code.
+ context = create_urllib3_context(ssl_version, cert_reqs,
+ ciphers=ciphers)
+
+ if ca_certs or ca_cert_dir:
+ try:
+ context.load_verify_locations(ca_certs, ca_cert_dir)
+ except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
+ raise SSLError(e)
+ # Py33 raises FileNotFoundError which subclasses OSError
+ # These are not equivalent unless we check the errno attribute
+ except OSError as e: # Platform-specific: Python 3.3 and beyond
+ if e.errno == errno.ENOENT:
+ raise SSLError(e)
+ raise
+ elif getattr(context, 'load_default_certs', None) is not None:
+ # try to load OS default certs; works well on Windows (require Python3.4+)
+ context.load_default_certs()
+
+ if certfile:
+ context.load_cert_chain(certfile, keyfile)
+ if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
+ return context.wrap_socket(sock, server_hostname=server_hostname)
+
+ warnings.warn(
+ 'An HTTPS request has been made, but the SNI (Subject Name '
+ 'Indication) extension to TLS is not available on this platform. '
+ 'This may cause the server to present an incorrect TLS '
+ 'certificate, which can cause validation failures. You can upgrade to '
+ 'a newer version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ SNIMissingWarning
+ )
+ return context.wrap_socket(sock)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
new file mode 100644
index 0000000..4041cf9
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
@@ -0,0 +1,243 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+import time
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+ """ Timeout configuration.
+
+ Timeouts can be defined as a default for a pool::
+
+ timeout = Timeout(connect=2.0, read=7.0)
+ http = PoolManager(timeout=timeout)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+ Timeouts can be disabled by setting all the parameters to ``None``::
+
+ no_timeout = Timeout(connect=None, read=None)
+ response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+ :param total:
+ This combines the connect and read timeouts into one; the read timeout
+ will be set to the time leftover from the connect attempt. In the
+ event that both a connect timeout and a total are specified, or a read
+ timeout and a total are specified, the shorter timeout will be applied.
+
+ Defaults to None.
+
+ :type total: integer, float, or None
+
+ :param connect:
+ The maximum amount of time to wait for a connection attempt to a server
+ to succeed. Omitting the parameter will default the connect timeout to
+ the system default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout for connection attempts.
+
+ :type connect: integer, float, or None
+
+ :param read:
+ The maximum amount of time to wait between consecutive
+ read operations for a response from the server. Omitting
+ the parameter will default the read timeout to the system
+ default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout.
+
+ :type read: integer, float, or None
+
+ .. note::
+
+ Many factors can affect the total amount of time for urllib3 to return
+ an HTTP response.
+
+ For example, Python's DNS resolver does not obey the timeout specified
+ on the socket. Other factors that can affect total request time include
+ high CPU load, high swap, the program running at a low priority level,
+ or other behaviors.
+
+ In addition, the read and total timeouts only measure the time between
+ read operations on the socket connecting the client and the server,
+ not the total amount of time for the request to return a complete
+ response. For most requests, the timeout is raised because the server
+ has not sent the first byte in the specified time. This is not always
+ the case; if a server streams one byte every fifteen seconds, a timeout
+ of 20 seconds will not trigger, even though the request will take
+ several minutes to complete.
+
+ If your goal is to cut off any request after a set amount of wall clock
+ time, consider having a second "watcher" thread to cut off a slow
+ request.
+ """
+
+ #: A sentinel object representing the default timeout value
+ DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+ def __init__(self, total=None, connect=_Default, read=_Default):
+ self._connect = self._validate_timeout(connect, 'connect')
+ self._read = self._validate_timeout(read, 'read')
+ self.total = self._validate_timeout(total, 'total')
+ self._start_connect = None
+
+ def __str__(self):
+ return '%s(connect=%r, read=%r, total=%r)' % (
+ type(self).__name__, self._connect, self._read, self.total)
+
+ @classmethod
+ def _validate_timeout(cls, value, name):
+ """ Check that a timeout attribute is valid.
+
+ :param value: The timeout value to validate
+ :param name: The name of the timeout attribute to validate. This is
+ used to specify in error messages.
+ :return: The validated and casted version of the given value.
+ :raises ValueError: If it is a numeric value less than or equal to
+ zero, or the type is not an integer, float, or None.
+ """
+ if value is _Default:
+ return cls.DEFAULT_TIMEOUT
+
+ if value is None or value is cls.DEFAULT_TIMEOUT:
+ return value
+
+ if isinstance(value, bool):
+ raise ValueError("Timeout cannot be a boolean value. It must "
+ "be an int, float or None.")
+ try:
+ float(value)
+ except (TypeError, ValueError):
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ try:
+ if value <= 0:
+ raise ValueError("Attempted to set %s timeout to %s, but the "
+ "timeout cannot be set to a value less "
+ "than or equal to 0." % (name, value))
+ except TypeError: # Python 3
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ return value
+
+ @classmethod
+ def from_float(cls, timeout):
+ """ Create a new Timeout from a legacy timeout value.
+
+ The timeout value used by httplib.py sets the same timeout on the
+ connect(), and recv() socket requests. This creates a :class:`Timeout`
+ object that sets the individual timeouts to the ``timeout`` value
+ passed to this function.
+
+ :param timeout: The legacy timeout value.
+ :type timeout: integer, float, sentinel default object, or None
+ :return: Timeout object
+ :rtype: :class:`Timeout`
+ """
+ return Timeout(read=timeout, connect=timeout)
+
+ def clone(self):
+ """ Create a copy of the timeout object
+
+ Timeout properties are stored per-pool but each request needs a fresh
+ Timeout object to ensure each one has its own start/stop configured.
+
+ :return: a copy of the timeout object
+ :rtype: :class:`Timeout`
+ """
+ # We can't use copy.deepcopy because that will also create a new object
+ # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+ # detect the user default.
+ return Timeout(connect=self._connect, read=self._read,
+ total=self.total)
+
+ def start_connect(self):
+ """ Start the timeout clock, used during a connect() attempt
+
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to start a timer that has been started already.
+ """
+ if self._start_connect is not None:
+ raise TimeoutStateError("Timeout timer has already been started.")
+ self._start_connect = current_time()
+ return self._start_connect
+
+ def get_connect_duration(self):
+ """ Gets the time elapsed since the call to :meth:`start_connect`.
+
+ :return: Elapsed time.
+ :rtype: float
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to get duration for a timer that hasn't been started.
+ """
+ if self._start_connect is None:
+ raise TimeoutStateError("Can't get connect duration for timer "
+ "that has not started.")
+ return current_time() - self._start_connect
+
+ @property
+ def connect_timeout(self):
+ """ Get the value to use when setting a connection timeout.
+
+ This will be a positive float or integer, the value None
+ (never timeout), or the default system timeout.
+
+ :return: Connect timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ """
+ if self.total is None:
+ return self._connect
+
+ if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+ return self.total
+
+ return min(self._connect, self.total)
+
+ @property
+ def read_timeout(self):
+ """ Get the value for the read timeout.
+
+ This assumes some time has elapsed in the connection timeout and
+ computes the read timeout appropriately.
+
+ If self.total is set, the read timeout is dependent on the amount of
+ time taken by the connect timeout. If the connection time has not been
+ established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+ raised.
+
+ :return: Value to use for the read timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+ has not yet been called on this object.
+ """
+ if (self.total is not None and
+ self.total is not self.DEFAULT_TIMEOUT and
+ self._read is not None and
+ self._read is not self.DEFAULT_TIMEOUT):
+ # In case the connect timeout has not yet been established.
+ if self._start_connect is None:
+ return self._read
+ return max(0, min(self.total - self.get_connect_duration(),
+ self._read))
+ elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+ return max(0, self.total - self.get_connect_duration())
+ else:
+ return self._read
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
new file mode 100644
index 0000000..99fd653
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
@@ -0,0 +1,231 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+
+
+url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ('http', 'https', None)
+
+
+class Url(namedtuple('Url', url_attrs)):
+ """
+ Datastructure for representing an HTTP URL. Used as a return value for
+ :func:`parse_url`. Both the scheme and host are normalized as they are
+ both case-insensitive according to RFC 3986.
+ """
+ __slots__ = ()
+
+ def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
+ query=None, fragment=None):
+ if path and not path.startswith('/'):
+ path = '/' + path
+ if scheme:
+ scheme = scheme.lower()
+ if host and scheme in NORMALIZABLE_SCHEMES:
+ host = host.lower()
+ return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
+ query, fragment)
+
+ @property
+ def hostname(self):
+ """For backwards-compatibility with urlparse. We're nice like that."""
+ return self.host
+
+ @property
+ def request_uri(self):
+ """Absolute path including the query string."""
+ uri = self.path or '/'
+
+ if self.query is not None:
+ uri += '?' + self.query
+
+ return uri
+
+ @property
+ def netloc(self):
+ """Network location including host and port"""
+ if self.port:
+ return '%s:%d' % (self.host, self.port)
+ return self.host
+
+ @property
+ def url(self):
+ """
+ Convert self into a url
+
+ This function should more or less round-trip with :func:`.parse_url`. The
+ returned url may not be exactly the same as the url inputted to
+ :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+ with a blank port will have : removed).
+
+ Example: ::
+
+ >>> U = parse_url('http://google.com/mail/')
+ >>> U.url
+ 'http://google.com/mail/'
+ >>> Url('http', 'username:password', 'host.com', 80,
+ ... '/path', 'query', 'fragment').url
+ 'http://username:password@host.com:80/path?query#fragment'
+ """
+ scheme, auth, host, port, path, query, fragment = self
+ url = ''
+
+ # We use "is not None" we want things to happen with empty strings (or 0 port)
+ if scheme is not None:
+ url += scheme + '://'
+ if auth is not None:
+ url += auth + '@'
+ if host is not None:
+ url += host
+ if port is not None:
+ url += ':' + str(port)
+ if path is not None:
+ url += path
+ if query is not None:
+ url += '?' + query
+ if fragment is not None:
+ url += '#' + fragment
+
+ return url
+
+ def __str__(self):
+ return self.url
+
+
+def split_first(s, delims):
+ """
+ Given a string and an iterable of delimiters, split on the first found
+ delimiter. Return two split parts and the matched delimiter.
+
+ If not found, then the first part is the full input string.
+
+ Example::
+
+ >>> split_first('foo/bar?baz', '?/=')
+ ('foo', 'bar?baz', '/')
+ >>> split_first('foo/bar?baz', '123')
+ ('foo/bar?baz', '', None)
+
+ Scales linearly with number of delims. Not ideal for large number of delims.
+ """
+ min_idx = None
+ min_delim = None
+ for d in delims:
+ idx = s.find(d)
+ if idx < 0:
+ continue
+
+ if min_idx is None or idx < min_idx:
+ min_idx = idx
+ min_delim = d
+
+ if min_idx is None or min_idx < 0:
+ return s, '', None
+
+ return s[:min_idx], s[min_idx + 1:], min_delim
+
+
+def parse_url(url):
+ """
+ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+ performed to parse incomplete urls. Fields not provided will be None.
+
+ Partly backwards-compatible with :mod:`urlparse`.
+
+ Example::
+
+ >>> parse_url('http://google.com/mail/')
+ Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+ >>> parse_url('google.com:80')
+ Url(scheme=None, host='google.com', port=80, path=None, ...)
+ >>> parse_url('/foo?bar')
+ Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+ """
+
+ # While this code has overlap with stdlib's urlparse, it is much
+ # simplified for our needs and less annoying.
+ # Additionally, this implementations does silly things to be optimal
+ # on CPython.
+
+ if not url:
+ # Empty
+ return Url()
+
+ scheme = None
+ auth = None
+ host = None
+ port = None
+ path = None
+ fragment = None
+ query = None
+
+ # Scheme
+ if '://' in url:
+ scheme, url = url.split('://', 1)
+
+ # Find the earliest Authority Terminator
+ # (http://tools.ietf.org/html/rfc3986#section-3.2)
+ url, path_, delim = split_first(url, ['/', '?', '#'])
+
+ if delim:
+ # Reassemble the path
+ path = delim + path_
+
+ # Auth
+ if '@' in url:
+ # Last '@' denotes end of auth part
+ auth, url = url.rsplit('@', 1)
+
+ # IPv6
+ if url and url[0] == '[':
+ host, url = url.split(']', 1)
+ host += ']'
+
+ # Port
+ if ':' in url:
+ _host, port = url.split(':', 1)
+
+ if not host:
+ host = _host
+
+ if port:
+ # If given, ports must be integers. No whitespace, no plus or
+ # minus prefixes, no non-integer digits such as ^2 (superscript).
+ if not port.isdigit():
+ raise LocationParseError(url)
+ try:
+ port = int(port)
+ except ValueError:
+ raise LocationParseError(url)
+ else:
+ # Blank ports are cool, too. (rfc3986#section-3.2.3)
+ port = None
+
+ elif not host and url:
+ host = url
+
+ if not path:
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+ # Fragment
+ if '#' in path:
+ path, fragment = path.split('#', 1)
+
+ # Query
+ if '?' in path:
+ path, query = path.split('?', 1)
+
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+
+def get_host(url):
+ """
+ Deprecated. Use :func:`parse_url` instead.
+ """
+ p = parse_url(url)
+ return p.scheme or 'http', p.hostname, p.port
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
new file mode 100644
index 0000000..21e7297
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+from .selectors import (
+ HAS_SELECT,
+ DefaultSelector,
+ EVENT_READ,
+ EVENT_WRITE
+)
+
+
+def _wait_for_io_events(socks, events, timeout=None):
+ """ Waits for IO events to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be interacted with immediately. """
+ if not HAS_SELECT:
+ raise ValueError('Platform does not have a selector')
+ if not isinstance(socks, list):
+ # Probably just a single socket.
+ if hasattr(socks, "fileno"):
+ socks = [socks]
+ # Otherwise it might be a non-list iterable.
+ else:
+ socks = list(socks)
+ with DefaultSelector() as selector:
+ for sock in socks:
+ selector.register(sock, events)
+ return [key[0].fileobj for key in
+ selector.select(timeout) if key[1] & events]
+
+
+def wait_for_read(socks, timeout=None):
+ """ Waits for reading to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be read from immediately. """
+ return _wait_for_io_events(socks, EVENT_READ, timeout)
+
+
+def wait_for_write(socks, timeout=None):
+ """ Waits for writing to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be written to immediately. """
+ return _wait_for_io_events(socks, EVENT_WRITE, timeout)
diff --git a/collectors/python.d.plugin/rabbitmq/Makefile.inc b/collectors/python.d.plugin/rabbitmq/Makefile.inc
new file mode 100644
index 0000000..7e67ef5
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rabbitmq/rabbitmq.chart.py
+dist_pythonconfig_DATA += rabbitmq/rabbitmq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rabbitmq/README.md rabbitmq/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
new file mode 100644
index 0000000..4ac6060
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -0,0 +1,58 @@
+# rabbitmq
+
+Module monitor rabbitmq performance and health metrics.
+
+Following charts are drawn:
+
+1. **Queued Messages**
+ * ready
+ * unacknowledged
+
+2. **Message Rates**
+ * ack
+ * redelivered
+ * deliver
+ * publish
+
+3. **Global Counts**
+ * channels
+ * consumers
+ * connections
+ * queues
+ * exchanges
+
+4. **File Descriptors**
+ * used descriptors
+
+5. **Socket Descriptors**
+ * used descriptors
+
+6. **Erlang processes**
+ * used processes
+
+7. **Erlang run queue**
+ * Erlang run queue
+
+8. **Memory**
+ * free memory in megabytes
+
+9. **Disk Space**
+ * free disk space in gigabytes
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 15672
+ user : 'guest'
+ pass : 'guest'
+
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
new file mode 100644
index 0000000..a8f7259
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+# Description: rabbitmq netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+API_NODE = 'api/nodes'
+API_OVERVIEW = 'api/overview'
+
+NODE_STATS = [
+ 'fd_used',
+ 'mem_used',
+ 'sockets_used',
+ 'proc_used',
+ 'disk_free',
+ 'run_queue'
+]
+
+OVERVIEW_STATS = [
+ 'object_totals.channels',
+ 'object_totals.consumers',
+ 'object_totals.connections',
+ 'object_totals.queues',
+ 'object_totals.exchanges',
+ 'queue_totals.messages_ready',
+ 'queue_totals.messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.redeliver',
+ 'message_stats.deliver',
+ 'message_stats.publish'
+]
+
+ORDER = [
+ 'queued_messages',
+ 'message_rates',
+ 'global_counts',
+ 'file_descriptors',
+ 'socket_descriptors',
+ 'erlang_processes',
+ 'erlang_run_queue',
+ 'memory',
+ 'disk_space'
+]
+
+CHARTS = {
+ 'file_descriptors': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'overview', 'rabbitmq.file_descriptors', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute']
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory', 'MiB', 'overview', 'rabbitmq.memory', 'area'],
+ 'lines': [
+ ['mem_used', 'used', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'disk_space': {
+ 'options': [None, 'Disk Space', 'GiB', 'overview', 'rabbitmq.disk_space', 'area'],
+ 'lines': [
+ ['disk_free', 'free', 'absolute', 1, 1 << 30]
+ ]
+ },
+ 'socket_descriptors': {
+ 'options': [None, 'Socket Descriptors', 'descriptors', 'overview', 'rabbitmq.sockets', 'line'],
+ 'lines': [
+ ['sockets_used', 'used', 'absolute']
+ ]
+ },
+ 'erlang_processes': {
+ 'options': [None, 'Erlang Processes', 'processes', 'overview', 'rabbitmq.processes', 'line'],
+ 'lines': [
+ ['proc_used', 'used', 'absolute']
+ ]
+ },
+ 'erlang_run_queue': {
+ 'options': [None, 'Erlang Run Queue', 'processes', 'overview', 'rabbitmq.erlang_run_queue', 'line'],
+ 'lines': [
+ ['run_queue', 'length', 'absolute']
+ ]
+ },
+ 'global_counts': {
+ 'options': [None, 'Global Counts', 'counts', 'overview', 'rabbitmq.global_counts', 'line'],
+ 'lines': [
+ ['object_totals_channels', 'channels', 'absolute'],
+ ['object_totals_consumers', 'consumers', 'absolute'],
+ ['object_totals_connections', 'connections', 'absolute'],
+ ['object_totals_queues', 'queues', 'absolute'],
+ ['object_totals_exchanges', 'exchanges', 'absolute']
+ ]
+ },
+ 'queued_messages': {
+ 'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'],
+ 'lines': [
+ ['queue_totals_messages_ready', 'ready', 'absolute'],
+ ['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute']
+ ]
+ },
+ 'message_rates': {
+ 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'line'],
+ 'lines': [
+ ['message_stats_ack', 'ack', 'incremental'],
+ ['message_stats_redeliver', 'redeliver', 'incremental'],
+ ['message_stats_deliver', 'deliver', 'incremental'],
+ ['message_stats_publish', 'publish', 'incremental']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = '{0}://{1}:{2}'.format(
+ configuration.get('scheme', 'http'),
+ configuration.get('host', '127.0.0.1'),
+ configuration.get('port', 15672),
+ )
+ self.node_name = str()
+
+ def _get_data(self):
+ data = dict()
+
+ stats = self.get_overview_stats()
+
+ if not stats:
+ return None
+
+ data.update(stats)
+
+ stats = self.get_nodes_stats()
+
+ if not stats:
+ return None
+
+ data.update(stats)
+
+ return data or None
+
+ def get_overview_stats(self):
+ url = '{0}/{1}'.format(self.url, API_OVERVIEW)
+
+ raw = self._get_raw_data(url)
+
+ if not raw:
+ return None
+
+ data = loads(raw)
+
+ self.node_name = data['node']
+
+ return fetch_data(raw_data=data, metrics=OVERVIEW_STATS)
+
+ def get_nodes_stats(self):
+ url = '{0}/{1}/{2}'.format(self.url, API_NODE, self.node_name)
+
+ raw = self._get_raw_data(url)
+
+ if not raw:
+ return None
+
+ data = loads(raw)
+
+ return fetch_data(raw_data=data, metrics=NODE_STATS)
+
+
+def fetch_data(raw_data, metrics):
+ data = dict()
+
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+
+ return data
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
new file mode 100644
index 0000000..ae0dbdb
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
@@ -0,0 +1,80 @@
+# netdata python.d.plugin configuration for rabbitmq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, rabbitmq plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # Rabbitmq port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ host: '127.0.0.1'
+ user: 'guest'
+ pass: 'guest'
diff --git a/collectors/python.d.plugin/redis/Makefile.inc b/collectors/python.d.plugin/redis/Makefile.inc
new file mode 100644
index 0000000..6aab089
--- /dev/null
+++ b/collectors/python.d.plugin/redis/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += redis/redis.chart.py
+dist_pythonconfig_DATA += redis/redis.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += redis/README.md redis/Makefile.inc
+
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
new file mode 100644
index 0000000..0bea037
--- /dev/null
+++ b/collectors/python.d.plugin/redis/README.md
@@ -0,0 +1,44 @@
+# redis
+
+Get INFO data from redis instance.
+
+Following charts are drawn:
+
+1. **Operations** per second
+ * operations
+
+2. **Hit rate** in percent
+ * rate
+
+3. **Memory utilization** in kilobytes
+ * total
+ * lua
+
+4. **Database keys**
+ * lines are creates dynamically based on how many databases are there
+
+5. **Clients**
+ * connected
+ * blocked
+
+6. **Slaves**
+ * connected
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 6379
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fredis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/redis/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
new file mode 100644
index 0000000..9dbb2c1
--- /dev/null
+++ b/collectors/python.d.plugin/redis/redis.chart.py
@@ -0,0 +1,258 @@
+# -*- coding: utf-8 -*-
+# Description: redis netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+REDIS_ORDER = [
+ 'operations',
+ 'hit_rate',
+ 'memory',
+ 'keys_redis',
+ 'eviction',
+ 'net',
+ 'connections',
+ 'clients',
+ 'slaves',
+ 'persistence',
+ 'bgsave_now',
+ 'bgsave_health',
+ 'uptime',
+]
+
+PIKA_ORDER = [
+ 'operations',
+ 'hit_rate',
+ 'memory',
+ 'keys_pika',
+ 'connections',
+ 'clients',
+ 'slaves',
+ 'uptime',
+]
+
+
+CHARTS = {
+ 'operations': {
+ 'options': [None, 'Operations', 'operations/s', 'operations', 'redis.operations', 'line'],
+ 'lines': [
+ ['total_commands_processed', 'commands', 'incremental'],
+ ['instantaneous_ops_per_sec', 'operations', 'absolute']
+ ]
+ },
+ 'hit_rate': {
+ 'options': [None, 'Hit rate', 'percentage', 'hits', 'redis.hit_rate', 'line'],
+ 'lines': [
+ ['hit_rate', 'rate', 'absolute']
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'line'],
+ 'lines': [
+ ['used_memory', 'total', 'absolute', 1, 1024],
+ ['used_memory_lua', 'lua', 'absolute', 1, 1024]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
+ 'lines': [
+ ['total_net_input_bytes', 'in', 'incremental', 8, 1000],
+ ['total_net_output_bytes', 'out', 'incremental', -8, 1000]
+ ]
+ },
+ 'keys_redis': {
+ 'options': [None, 'Keys per Database', 'keys', 'keys', 'redis.keys', 'line'],
+ 'lines': []
+ },
+ 'keys_pika': {
+ 'options': [None, 'Keys', 'keys', 'keys', 'redis.keys', 'line'],
+ 'lines': [
+ ['kv_keys', 'kv', 'absolute'],
+ ['hash_keys', 'hash', 'absolute'],
+ ['list_keys', 'list', 'absolute'],
+ ['zset_keys', 'zset', 'absolute'],
+ ['set_keys', 'set', 'absolute']
+ ]
+ },
+ 'eviction': {
+ 'options': [None, 'Evicted Keys', 'keys', 'keys', 'redis.eviction', 'line'],
+ 'lines': [
+ ['evicted_keys', 'evicted', 'absolute']
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'redis.connections', 'line'],
+ 'lines': [
+ ['total_connections_received', 'received', 'incremental', 1],
+ ['rejected_connections', 'rejected', 'incremental', -1]
+ ]
+ },
+ 'clients': {
+ 'options': [None, 'Clients', 'clients', 'connections', 'redis.clients', 'line'],
+ 'lines': [
+ ['connected_clients', 'connected', 'absolute', 1],
+ ['blocked_clients', 'blocked', 'absolute', -1]
+ ]
+ },
+ 'slaves': {
+ 'options': [None, 'Slaves', 'slaves', 'replication', 'redis.slaves', 'line'],
+ 'lines': [
+ ['connected_slaves', 'connected', 'absolute']
+ ]
+ },
+ 'persistence': {
+ 'options': [None, 'Persistence Changes Since Last Save', 'changes', 'persistence',
+ 'redis.rdb_changes', 'line'],
+ 'lines': [
+ ['rdb_changes_since_last_save', 'changes', 'absolute']
+ ]
+ },
+ 'bgsave_now': {
+ 'options': [None, 'Duration of the RDB Save Operation', 'seconds', 'persistence',
+ 'redis.bgsave_now', 'absolute'],
+ 'lines': [
+ ['rdb_bgsave_in_progress', 'rdb save', 'absolute']
+ ]
+ },
+ 'bgsave_health': {
+ 'options': [None, 'Status of the Last RDB Save Operation', 'status', 'persistence',
+ 'redis.bgsave_health', 'line'],
+ 'lines': [
+ ['rdb_last_bgsave_status', 'rdb save', 'absolute']
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'redis.uptime', 'line'],
+ 'lines': [
+ ['uptime_in_seconds', 'uptime', 'absolute']
+ ]
+ }
+}
+
+
+def copy_chart(name):
+ return {name: deepcopy(CHARTS[name])}
+
+
+RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)')
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self._keep_alive = True
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 6379)
+ self.unix_socket = self.configuration.get('socket')
+ p = self.configuration.get('pass')
+ self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
+ self.request = 'INFO\r\n'.encode()
+ self.bgsave_time = 0
+
+ def do_auth(self):
+ resp = self._get_raw_data(request=self.auth_request)
+ if not resp:
+ return False
+ if resp.strip() != '+OK':
+ self.error('invalid password')
+ return False
+ return True
+
+ def get_raw_and_parse(self):
+ if self.auth_request and not self.do_auth():
+ return None
+
+ resp = self._get_raw_data()
+
+ if not resp:
+ return None
+
+ parsed = RE.findall(resp)
+
+ if not parsed:
+ self.error('response is invalid/empty')
+ return None
+
+ return dict((k.replace(' ', '_'), v) for k, v in parsed)
+
+ def get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ data = self.get_raw_and_parse()
+
+ if not data:
+ return None
+
+ try:
+ data['hit_rate'] = (
+ (int(data['keyspace_hits']) * 100) / (int(data['keyspace_hits']) + int(data['keyspace_misses']))
+ )
+ except (KeyError, ZeroDivisionError):
+ data['hit_rate'] = 0
+
+ if data.get('redis_version') and data.get('rdb_bgsave_in_progress'):
+ self.get_data_redis_specific(data)
+
+ return data
+
+ def get_data_redis_specific(self, data):
+ if data['rdb_bgsave_in_progress'] != '0':
+ self.bgsave_time += self.update_every
+ else:
+ self.bgsave_time = 0
+
+ data['rdb_last_bgsave_status'] = 0 if data['rdb_last_bgsave_status'] == 'ok' else 1
+ data['rdb_bgsave_in_progress'] = self.bgsave_time
+
+ def check(self):
+ """
+ Parse configuration, check if redis is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ data = self.get_raw_and_parse()
+
+ if not data:
+ return False
+
+ self.order = PIKA_ORDER if data.get('pika_version') else REDIS_ORDER
+
+ for n in self.order:
+ self.definitions.update(copy_chart(n))
+
+ if data.get('redis_version'):
+ for k in data:
+ if k.startswith('db'):
+ self.definitions['keys_redis']['lines'].append([k, None, 'absolute'])
+
+ return True
+
+ def _check_raw_data(self, data):
+ """
+ Check if all data has been gathered from socket.
+ Parse first line containing message length and check against received message
+ :param data: str
+ :return: boolean
+ """
+ length = len(data)
+ supposed = data.split('\n')[0][1:-1]
+ offset = len(supposed) + 4 # 1 dollar sing, 1 new line character + 1 ending sequence '\r\n'
+ if not supposed.isdigit():
+ return True
+ supposed = int(supposed)
+
+ if length - offset >= supposed:
+ self.debug('received full response from redis')
+ return True
+
+ self.debug('waiting more data from redis')
+ return False
diff --git a/collectors/python.d.plugin/redis/redis.conf b/collectors/python.d.plugin/redis/redis.conf
new file mode 100644
index 0000000..b456d75
--- /dev/null
+++ b/collectors/python.d.plugin/redis/redis.conf
@@ -0,0 +1,110 @@
+# netdata python.d.plugin configuration for redis
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, redis also supports the following:
+#
+# socket: 'path/to/redis.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# and
+# pass: 'password' # the redis password to use for AUTH command
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+socket1:
+ name : 'local'
+ socket : '/tmp/redis.sock'
+ # pass : ''
+
+socket2:
+ name : 'local'
+ socket : '/var/run/redis/redis.sock'
+ # pass : ''
+
+socket3:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+ # pass : ''
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 6379
+ # pass : ''
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 6379
+ # pass : ''
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 6379
+ # pass : ''
+
diff --git a/collectors/python.d.plugin/rethinkdbs/Makefile.inc b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
new file mode 100644
index 0000000..dec6044
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rethinkdbs/rethinkdbs.chart.py
+dist_pythonconfig_DATA += rethinkdbs/rethinkdbs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rethinkdbs/README.md rethinkdbs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
new file mode 100644
index 0000000..183c7f7
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -0,0 +1,36 @@
+# rethinkdbs
+
+Module monitor rethinkdb health metrics.
+
+Following charts are drawn:
+
+1. **Connected Servers**
+ * connected
+ * missing
+
+2. **Active Clients**
+ * active
+
+3. **Queries** per second
+ * queries
+
+4. **Documents** per second
+ * documents
+
+### configuration
+
+```yaml
+
+localhost:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 28015
+ user : "user"
+ password : "pass"
+```
+
+When no configuration file is found, module tries to connect to `127.0.0.1:28015`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frethinkdbs%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
new file mode 100644
index 0000000..da2f26f
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
@@ -0,0 +1,233 @@
+# -*- coding: utf-8 -*-
+# Description: rethinkdb netdata python.d module
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import rethinkdb as rdb
+ HAS_RETHINKDB = True
+except ImportError:
+ HAS_RETHINKDB = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+ORDER = [
+ 'cluster_connected_servers',
+ 'cluster_clients_active',
+ 'cluster_queries',
+ 'cluster_documents',
+]
+
+
+def cluster_charts():
+ return {
+ 'cluster_connected_servers': {
+ 'options': [None, 'Connected Servers', 'servers', 'cluster', 'rethinkdb.cluster_connected_servers',
+ 'stacked'],
+ 'lines': [
+ ['cluster_servers_connected', 'connected'],
+ ['cluster_servers_missing', 'missing'],
+ ]
+ },
+ 'cluster_clients_active': {
+ 'options': [None, 'Active Clients', 'clients', 'cluster', 'rethinkdb.cluster_clients_active',
+ 'line'],
+ 'lines': [
+ ['cluster_clients_active', 'active'],
+ ]
+ },
+ 'cluster_queries': {
+ 'options': [None, 'Queries', 'queries/s', 'cluster', 'rethinkdb.cluster_queries', 'line'],
+ 'lines': [
+ ['cluster_queries_per_sec', 'queries'],
+ ]
+ },
+ 'cluster_documents': {
+ 'options': [None, 'Documents', 'documents/s', 'cluster', 'rethinkdb.cluster_documents', 'line'],
+ 'lines': [
+ ['cluster_read_docs_per_sec', 'reads'],
+ ['cluster_written_docs_per_sec', 'writes'],
+ ]
+ },
+ }
+
+
+def server_charts(n):
+ o = [
+ '{0}_client_connections'.format(n),
+ '{0}_clients_active'.format(n),
+ '{0}_queries'.format(n),
+ '{0}_documents'.format(n),
+ ]
+ f = 'server {0}'.format(n)
+
+ c = {
+ o[0]: {
+ 'options': [None, 'Client Connections', 'connections', f, 'rethinkdb.client_connections', 'line'],
+ 'lines': [
+ ['{0}_client_connections'.format(n), 'connections'],
+ ]
+ },
+ o[1]: {
+ 'options': [None, 'Active Clients', 'clients', f, 'rethinkdb.clients_active', 'line'],
+ 'lines': [
+ ['{0}_clients_active'.format(n), 'active'],
+ ]
+ },
+ o[2]: {
+ 'options': [None, 'Queries', 'queries/s', f, 'rethinkdb.queries', 'line'],
+ 'lines': [
+ ['{0}_queries_total'.format(n), 'queries', 'incremental'],
+ ]
+ },
+ o[3]: {
+ 'options': [None, 'Documents', 'documents/s', f, 'rethinkdb.documents', 'line'],
+ 'lines': [
+ ['{0}_read_docs_total'.format(n), 'reads', 'incremental'],
+ ['{0}_written_docs_total'.format(n), 'writes', 'incremental'],
+ ]
+ },
+ }
+
+ return o, c
+
+
+class Cluster:
+ def __init__(self, raw):
+ self.raw = raw
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ return {
+ 'cluster_clients_active': qe['clients_active'],
+ 'cluster_queries_per_sec': qe['queries_per_sec'],
+ 'cluster_read_docs_per_sec': qe['read_docs_per_sec'],
+ 'cluster_written_docs_per_sec': qe['written_docs_per_sec'],
+ 'cluster_servers_connected': 0,
+ 'cluster_servers_missing': 0,
+ }
+
+
+class Server:
+ def __init__(self, raw):
+ self.name = raw['server']
+ self.raw = raw
+
+ def error(self):
+ return self.raw.get('error')
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ d = {
+ 'client_connections': qe['client_connections'],
+ 'clients_active': qe['clients_active'],
+ 'queries_total': qe['queries_total'],
+ 'read_docs_total': qe['read_docs_total'],
+ 'written_docs_total': qe['written_docs_total'],
+ }
+
+ return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = cluster_charts()
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 28015)
+ self.user = self.configuration.get('user', 'admin')
+ self.password = self.configuration.get('password')
+ self.timeout = self.configuration.get('timeout', 2)
+ self.conn = None
+ self.alive = True
+
+ def check(self):
+ if not HAS_RETHINKDB:
+ self.error('"rethinkdb" module is needed to use rethinkdbs.py')
+ return False
+
+ if not self.connect():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ for v in stats[1:]:
+ if get_id(v) == 'server':
+ o, c = server_charts(v['server'])
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def get_data(self):
+ if not self.is_alive():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ data = dict()
+
+ # cluster
+ data.update(Cluster(stats[0]).data())
+
+ # servers
+ for v in stats[1:]:
+ if get_id(v) != 'server':
+ continue
+
+ s = Server(v)
+
+ if s.error():
+ data['cluster_servers_missing'] += 1
+ else:
+ data['cluster_servers_connected'] += 1
+ data.update(s.data())
+
+ return data
+
+ def get_stats(self):
+ try:
+ return list(rdb.db('rethinkdb').table('stats').run(self.conn).items)
+ except rdb.errors.ReqlError:
+ self.alive = False
+ return None
+
+ def connect(self):
+ try:
+ self.conn = rdb.connect(
+ host=self.host,
+ port=self.port,
+ user=self.user,
+ password=self.password,
+ timeout=self.timeout,
+ )
+ self.alive = True
+ return True
+ except rdb.errors.ReqlError as error:
+ self.error('Connection to {0}:{1} failed: {2}'.format(self.host, self.port, error))
+ return False
+
+ def reconnect(self):
+ # The connection is already closed after rdb.errors.ReqlError,
+ # so we do not need to call conn.close()
+ if self.connect():
+ return True
+ return False
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+
+def get_id(v):
+ return v['id'][0]
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
new file mode 100644
index 0000000..d671acb
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
@@ -0,0 +1,76 @@
+# netdata python.d.plugin configuration for rethinkdb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, rethinkdb also supports the following:
+#
+# host: IP or HOSTNAME # default is 'localhost'
+# port: PORT # default is 28015
+# user: USERNAME # default is 'admin'
+# password: PASSWORD # not set by default
+# timeout: TIMEOUT # default is 2
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ name: 'local'
+ host: 'localhost'
diff --git a/collectors/python.d.plugin/retroshare/Makefile.inc b/collectors/python.d.plugin/retroshare/Makefile.inc
new file mode 100644
index 0000000..891193e
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += retroshare/retroshare.chart.py
+dist_pythonconfig_DATA += retroshare/retroshare.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += retroshare/README.md retroshare/Makefile.inc
+
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
new file mode 100644
index 0000000..a8a5888
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -0,0 +1,3 @@
+# retroshare
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fretroshare%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
new file mode 100644
index 0000000..feb871f
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+# Description: RetroShare netdata python.d module
+# Authors: sehraf
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'dht',
+]
+
+CHARTS = {
+ 'bandwidth': {
+ 'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
+ 'lines': [
+ ['bandwidth_up_kb', 'Upload'],
+ ['bandwidth_down_kb', 'Download']
+ ]
+ },
+ 'peers': {
+ 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
+ 'lines': [
+ ['peers_all', 'All friends'],
+ ['peers_connected', 'Connected friends']
+ ]
+ },
+ 'dht': {
+ 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
+ 'lines': [
+ ['dht_size_all', 'DHT nodes estimated'],
+ ['dht_size_rs', 'RS nodes estimated']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.baseurl = self.configuration.get('url', 'http://localhost:9090')
+
+ def _get_stats(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()
+ parsed = json.loads(raw)
+ if str(parsed['returncode']) != 'ok':
+ return None
+ except (TypeError, ValueError):
+ return None
+
+ return parsed['data'][0]
+
+ def _get_data(self):
+ """
+ Get data from API
+ :return: dict
+ """
+ self.url = self.baseurl + '/api/v2/stats'
+ data = self._get_stats()
+ if data is None:
+ return None
+
+ data['bandwidth_up_kb'] = data['bandwidth_up_kb'] * -1
+ if data['dht_active'] is False:
+ data['dht_size_all'] = None
+ data['dht_size_rs'] = None
+
+ return data
diff --git a/collectors/python.d.plugin/retroshare/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf
new file mode 100644
index 0000000..3d0af53
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/retroshare.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for RetroShare
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, RetroShare also supports the following:
+#
+# - url: 'url' # the URL to the WebUI
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ url: 'http://localhost:9090'
diff --git a/collectors/python.d.plugin/samba/Makefile.inc b/collectors/python.d.plugin/samba/Makefile.inc
new file mode 100644
index 0000000..230a8ba
--- /dev/null
+++ b/collectors/python.d.plugin/samba/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += samba/samba.chart.py
+dist_pythonconfig_DATA += samba/samba.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += samba/README.md samba/Makefile.inc
+
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
new file mode 100644
index 0000000..97f2e3d
--- /dev/null
+++ b/collectors/python.d.plugin/samba/README.md
@@ -0,0 +1,69 @@
+# samba
+
+Performance metrics of Samba file sharing.
+
+**Requirements:**
+* `smbstatus` program
+* `sudo` program
+* `smbd` must be compiled with profiling enabled
+* `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
+* `netdata` user needs to be able to sudo the `smbstatus` program without password
+
+It produces the following charts:
+
+1. **Syscall R/Ws** in kilobytes/s
+ * sendfile
+ * recvfle
+
+2. **Smb2 R/Ws** in kilobytes/s
+ * readout
+ * writein
+ * readin
+ * writeout
+
+3. **Smb2 Create/Close** in operations/s
+ * create
+ * close
+
+4. **Smb2 Info** in operations/s
+ * getinfo
+ * setinfo
+
+5. **Smb2 Find** in operations/s
+ * find
+
+6. **Smb2 Notify** in operations/s
+ * notify
+
+7. **Smb2 Lesser Ops** as counters
+ * tcon
+ * negprot
+ * tdis
+ * cancel
+ * logoff
+ * flush
+ * lock
+ * keepalive
+ * break
+ * sessetup
+
+### prerequisite
+This module uses `smbstatus` which can only be executed by root. It uses
+`sudo` and assumes that it is configured such that the `netdata` user can
+execute `smbstatus` as root without password.
+
+Add to `sudoers`:
+
+ netdata ALL=(root) NOPASSWD: /path/to/smbstatus
+
+### configuration
+
+ **samba** is disabled by default. Should be explicitly enabled in `python.d.conf`.
+
+```yaml
+samba: yes
+```
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsamba%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
new file mode 100644
index 0000000..ac89c29
--- /dev/null
+++ b/collectors/python.d.plugin/samba/samba.chart.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+# Description: samba netdata python.d module
+# Author: Christopher Cox <chris_cox@endlessnow.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# The netdata user needs to be able to be able to sudo the smbstatus program
+# without password:
+# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+#
+# This makes calls to smbstatus -P
+#
+# This just looks at a couple of values out of syscall, and some from smb2.
+#
+# The Lesser Ops chart is merely a display of current counter values. They
+# didn't seem to change much to me. However, if you notice something changing
+# a lot there, bring one or more out into its own chart and make it incremental
+# (like find and notify... good examples).
+
+import re
+
+from bases.collection import find_binary
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+
+disabled_by_default = True
+
+update_every = 5
+
+ORDER = [
+ 'syscall_rw',
+ 'smb2_rw',
+ 'smb2_create_close',
+ 'smb2_info',
+ 'smb2_find',
+ 'smb2_notify',
+ 'smb2_sm_count'
+]
+
+CHARTS = {
+ 'syscall_rw': {
+ 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'],
+ 'lines': [
+ ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
+ ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_rw': {
+ 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'],
+ 'lines': [
+ ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
+ ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
+ ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
+ ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_create_close': {
+ 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'],
+ 'lines': [
+ ['smb2_create_count', 'create', 'incremental', 1, 1],
+ ['smb2_close_count', 'close', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_info': {
+ 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'],
+ 'lines': [
+ ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
+ ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_find': {
+ 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'],
+ 'lines': [
+ ['smb2_find_count', 'find', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_notify': {
+ 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'],
+ 'lines': [
+ ['smb2_notify_count', 'notify', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_sm_count': {
+ 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'],
+ 'lines': [
+ ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
+ ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
+ ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
+ ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
+ ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
+ ['smb2_flush_count', 'flush', 'absolute', 1, 1],
+ ['smb2_lock_count', 'lock', 'absolute', 1, 1],
+ ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
+ ['smb2_break_count', 'break', 'absolute', 1, 1],
+ ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
+
+ def check(self):
+ sudo_binary, smbstatus_binary = find_binary('sudo'), find_binary('smbstatus')
+
+ if not (sudo_binary and smbstatus_binary):
+ self.error("Can\'t locate 'sudo' or 'smbstatus' binary")
+ return False
+
+ self.command = [sudo_binary, '-v']
+ err = self._get_raw_data(stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+
+ self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
+
+ return ExecutableService.check(self)
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ parsed = self.rgx_smb2.findall(' '.join(raw_data))
+
+ return dict(parsed) or None
diff --git a/collectors/python.d.plugin/samba/samba.conf b/collectors/python.d.plugin/samba/samba.conf
new file mode 100644
index 0000000..db15d4e
--- /dev/null
+++ b/collectors/python.d.plugin/samba/samba.conf
@@ -0,0 +1,60 @@
+# netdata python.d.plugin configuration for samba
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file
diff --git a/collectors/python.d.plugin/sensors/Makefile.inc b/collectors/python.d.plugin/sensors/Makefile.inc
new file mode 100644
index 0000000..5fb26e1
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += sensors/sensors.chart.py
+dist_pythonconfig_DATA += sensors/sensors.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
+
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
new file mode 100644
index 0000000..e3f956f
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -0,0 +1,19 @@
+# sensors
+
+System sensors information.
+
+Charts are created dynamically.
+
+### configuration
+
+For detailed configuration information please read [`sensors.conf`](sensors.conf) file.
+
+### possible issues
+
+There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
+We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827).
+Please join this discussion for help.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsensors%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
new file mode 100644
index 0000000..e622eb8
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -0,0 +1,165 @@
+# -*- coding: utf-8 -*-
+# Description: sensors netdata python.d plugin
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from third_party import lm_sensors as sensors
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+ORDER = [
+ 'temperature',
+ 'fan',
+ 'voltage',
+ 'current',
+ 'power',
+ 'energy',
+ 'humidity',
+]
+
+# This is a prototype of chart definition which is used to dynamically create self.definitions
+CHARTS = {
+ 'temperature': {
+ 'options': [None, ' temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'voltage': {
+ 'options': [None, ' voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'current': {
+ 'options': [None, ' current', 'Ampere', 'current', 'sensors.current', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'power': {
+ 'options': [None, ' power', 'Watt', 'power', 'sensors.power', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000000]
+ ]
+ },
+ 'fan': {
+ 'options': [None, ' fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'energy': {
+ 'options': [None, ' energy', 'Joule', 'energy', 'sensors.energy', 'areastack'],
+ 'lines': [
+ [None, None, 'incremental', 1, 1000000]
+ ]
+ },
+ 'humidity': {
+ 'options': [None, ' humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ }
+}
+
+LIMITS = {
+ 'temperature': [-127, 1000],
+ 'voltage': [-127, 127],
+ 'current': [-127, 127],
+ 'fan': [0, 65535]
+}
+
+TYPE_MAP = {
+ 0: 'voltage',
+ 1: 'fan',
+ 2: 'temperature',
+ 3: 'power',
+ 4: 'energy',
+ 5: 'current',
+ 6: 'humidity',
+ 7: 'max_main',
+ 16: 'vid',
+ 17: 'intrusion',
+ 18: 'max_other',
+ 24: 'beep_enable'
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.chips = list()
+
+ def get_data(self):
+ data = dict()
+ try:
+ for chip in sensors.ChipIterator():
+ prefix = sensors.chip_snprintf_name(chip)
+ for feature in sensors.FeatureIterator(chip):
+ sfi = sensors.SubFeatureIterator(chip, feature)
+ val = None
+ for sf in sfi:
+ try:
+ val = sensors.get_value(chip, sf.number)
+ break
+ except sensors.SensorsError:
+ continue
+ if val is None:
+ continue
+ type_name = TYPE_MAP[feature.type]
+ if type_name in LIMITS:
+ limit = LIMITS[type_name]
+ if val < limit[0] or val > limit[1]:
+ continue
+ data[prefix + '_' + str(feature.name.decode())] = int(val * 1000)
+ except sensors.SensorsError as error:
+ self.error(error)
+ return None
+
+ return data or None
+
+ def create_definitions(self):
+ for sensor in ORDER:
+ for chip in sensors.ChipIterator():
+ chip_name = sensors.chip_snprintf_name(chip)
+ if self.chips and not any([chip_name.startswith(ex) for ex in self.chips]):
+ continue
+ for feature in sensors.FeatureIterator(chip):
+ sfi = sensors.SubFeatureIterator(chip, feature)
+ vals = list()
+ for sf in sfi:
+ try:
+ vals.append(sensors.get_value(chip, sf.number))
+ except sensors.SensorsError as error:
+ self.error('{0}: {1}'.format(sf.name, error))
+ continue
+ if not vals or (vals[0] == 0 and feature.type != 1):
+ continue
+ if TYPE_MAP[feature.type] == sensor:
+ # create chart
+ name = chip_name + '_' + TYPE_MAP[feature.type]
+ if name not in self.order:
+ self.order.append(name)
+ chart_def = list(CHARTS[sensor]['options'])
+ chart_def[1] = chip_name + chart_def[1]
+ self.definitions[name] = {'options': chart_def}
+ self.definitions[name]['lines'] = []
+ line = list(CHARTS[sensor]['lines'][0])
+ line[0] = chip_name + '_' + str(feature.name.decode())
+ line[1] = sensors.get_label(chip, feature)
+ self.definitions[name]['lines'].append(line)
+
+ def check(self):
+ try:
+ sensors.init()
+ except sensors.SensorsError as error:
+ self.error(error)
+ return False
+
+ self.create_definitions()
+
+ return True
diff --git a/collectors/python.d.plugin/sensors/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf
new file mode 100644
index 0000000..d3369ba
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/sensors.conf
@@ -0,0 +1,61 @@
+# netdata python.d.plugin configuration for sensors
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors types.
+# Comment the ones you want to disable.
+# Also, re-arranging this list controls the order of the charts at the
+# netdata dashboard.
+
+types:
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors chips.
+# Uncomment the first line (chips:) and add chip names below it.
+# The chip names that start with like that will be matched.
+# You can find the chip names using the sensors command.
+
+#chips:
+# - i8k
+# - coretemp
+#
+# chip names can be found using the sensors shell command
+# the prefix is matched (anything that starts like that)
+#
+#----------------------------------------------------------------------
+
diff --git a/collectors/python.d.plugin/smartd_log/Makefile.inc b/collectors/python.d.plugin/smartd_log/Makefile.inc
new file mode 100644
index 0000000..dc1d0f3
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += smartd_log/smartd_log.chart.py
+dist_pythonconfig_DATA += smartd_log/smartd_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += smartd_log/README.md smartd_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
new file mode 100644
index 0000000..3b0816f
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -0,0 +1,103 @@
+# smartd_log
+
+Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+
+**Requirements:**
+* `smartmontools`
+
+It produces following charts for SCSI devices:
+
+1. **Read Error Corrected**
+
+2. **Read Error Uncorrected**
+
+3. **Write Error Corrected**
+
+4. **Write Error Uncorrected**
+
+5. **Verify Error Corrected**
+
+6. **Verify Error Uncorrected**
+
+7. **Temperature**
+
+
+For ATA devices:
+1. **Read Error Rate**
+
+2. **Seek Error Rate**
+
+3. **Soft Read Error Rate**
+
+4. **Write Error Rate**
+
+5. **SATA Interface Downshift**
+
+6. **UDMA CRC Error Count**
+
+7. **Throughput Performance**
+
+8. **Seek Time Performance**
+
+9. **Start/Stop Count**
+
+10. **Power-On Hours Count**
+
+11. **Power Cycle Count**
+
+12. **Unexpected Power Loss**
+
+13. **Spin-Up Time**
+
+14. **Spin-up Retries**
+
+15. **Calibration Retries**
+
+16. **Temperature**
+
+17. **Reallocated Sectors Count**
+
+18. **Reserved Block Count**
+
+19. **Program Fail Count**
+
+20. **Erase Fail Count**
+
+21. **Wear Leveller Worst Case Erase Count**
+
+22. **Unused Reserved NAND Blocks**
+
+23. **Reallocation Event Count**
+
+24. **Current Pending Sector Count**
+
+25. **Offline Uncorrectable Sector Count**
+
+26. **Percent Lifetime Used**
+
+### prerequisite
+`smartd` must be running with `-A` option to write smartd attribute information to files.
+
+For this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:
+
+
+```
+# dump smartd attrs info every 600 seconds
+smartd_opts="-A /var/log/smartd/ -i 600"
+```
+
+
+`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.
+
+### configuration
+
+```yaml
+local:
+ log_path : '/var/log/smartd/'
+```
+
+If no configuration is given, module will attempt to read log files in `/var/log/smartd/` directory.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsmartd_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
new file mode 100644
index 0000000..871025a
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -0,0 +1,728 @@
+# -*- coding: utf-8 -*-
+# Description: smart netdata python.d module
+# Author: l2isbad, vorph1
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+
+from copy import deepcopy
+from time import time
+
+from bases.collection import read_last_line
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+INCREMENTAL = 'incremental'
+ABSOLUTE = 'absolute'
+
+ATA = 'ata'
+SCSI = 'scsi'
+CSV = '.csv'
+
+DEF_RESCAN_INTERVAL = 60
+DEF_AGE = 30
+DEF_PATH = '/var/log/smartd'
+
+ATTR1 = '1'
+ATTR2 = '2'
+ATTR3 = '3'
+ATTR4 = '4'
+ATTR5 = '5'
+ATTR7 = '7'
+ATTR8 = '8'
+ATTR9 = '9'
+ATTR10 = '10'
+ATTR11 = '11'
+ATTR12 = '12'
+ATTR13 = '13'
+ATTR170 = '170'
+ATTR171 = '171'
+ATTR172 = '172'
+ATTR173 = '173'
+ATTR174 = '174'
+ATTR180 = '180'
+ATTR183 = '183'
+ATTR190 = '190'
+ATTR194 = '194'
+ATTR196 = '196'
+ATTR197 = '197'
+ATTR198 = '198'
+ATTR199 = '199'
+ATTR202 = '202'
+ATTR206 = '206'
+ATTR_READ_ERR_COR = 'read-total-err-corrected'
+ATTR_READ_ERR_UNC = 'read-total-unc-errors'
+ATTR_WRITE_ERR_COR = 'write-total-err-corrected'
+ATTR_WRITE_ERR_UNC = 'write-total-unc-errors'
+ATTR_VERIFY_ERR_COR = 'verify-total-err-corrected'
+ATTR_VERIFY_ERR_UNC = 'verify-total-unc-errors'
+ATTR_TEMPERATURE = 'temperature'
+
+
+RE_ATA = re.compile(
+ '(\d+);' # attribute
+ '(\d+);' # normalized value
+ '(\d+)', # raw value
+ re.X
+)
+
+RE_SCSI = re.compile(
+ '([a-z-]+);' # attribute
+ '([0-9.]+)', # raw value
+ re.X
+)
+
+ORDER = [
+ # errors
+ 'read_error_rate',
+ 'seek_error_rate',
+ 'soft_read_error_rate',
+ 'write_error_rate',
+ 'read_total_err_corrected',
+ 'read_total_unc_errors',
+ 'write_total_err_corrected',
+ 'write_total_unc_errors',
+ 'verify_total_err_corrected',
+ 'verify_total_unc_errors',
+ # external failure
+ 'sata_interface_downshift',
+ 'udma_crc_error_count',
+ # performance
+ 'throughput_performance',
+ 'seek_time_performance',
+ # power
+ 'start_stop_count',
+ 'power_on_hours_count',
+ 'power_cycle_count',
+ 'unexpected_power_loss',
+ # spin
+ 'spin_up_time',
+ 'spin_up_retries',
+ 'calibration_retries',
+ # temperature
+ 'airflow_temperature_celsius',
+ 'temperature_celsius',
+ # wear
+ 'reallocated_sectors_count',
+ 'reserved_block_count',
+ 'program_fail_count',
+ 'erase_fail_count',
+ 'wear_leveller_worst_case_erase_count',
+ 'unused_reserved_nand_blocks',
+ 'reallocation_event_count',
+ 'current_pending_sector_count',
+ 'offline_uncorrectable_sector_count',
+ 'percent_lifetime_used',
+]
+
+CHARTS = {
+ 'read_error_rate': {
+ 'options': [None, 'Read Error Rate', 'value', 'errors', 'smartd_log.read_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR1],
+ 'algo': ABSOLUTE,
+ },
+ 'seek_error_rate': {
+ 'options': [None, 'Seek Error Rate', 'value', 'errors', 'smartd_log.seek_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR7],
+ 'algo': ABSOLUTE,
+ },
+ 'soft_read_error_rate': {
+ 'options': [None, 'Soft Read Error Rate', 'errors', 'errors', 'smartd_log.soft_read_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR13],
+ 'algo': INCREMENTAL,
+ },
+ 'write_error_rate': {
+ 'options': [None, 'Write Error Rate', 'value', 'errors', 'smartd_log.write_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR206],
+ 'algo': ABSOLUTE,
+ },
+ 'read_total_err_corrected': {
+ 'options': [None, 'Read Error Corrected', 'errors', 'errors', 'smartd_log.read_total_err_corrected', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_READ_ERR_COR],
+ 'algo': INCREMENTAL,
+ },
+ 'read_total_unc_errors': {
+ 'options': [None, 'Read Error Uncorrected', 'errors', 'errors', 'smartd_log.read_total_unc_errors', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_READ_ERR_UNC],
+ 'algo': INCREMENTAL,
+ },
+ 'write_total_err_corrected': {
+ 'options': [None, 'Write Error Corrected', 'errors', 'errors', 'smartd_log.read_total_err_corrected', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_WRITE_ERR_COR],
+ 'algo': INCREMENTAL,
+ },
+ 'write_total_unc_errors': {
+ 'options': [None, 'Write Error Uncorrected', 'errors', 'errors', 'smartd_log.write_total_unc_errors', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_WRITE_ERR_UNC],
+ 'algo': INCREMENTAL,
+ },
+ 'verify_total_err_corrected': {
+ 'options': [None, 'Verify Error Corrected', 'errors', 'errors', 'smartd_log.verify_total_err_corrected',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_VERIFY_ERR_COR],
+ 'algo': INCREMENTAL,
+ },
+ 'verify_total_unc_errors': {
+ 'options': [None, 'Verify Error Uncorrected', 'errors', 'errors', 'smartd_log.verify_total_unc_errors', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_VERIFY_ERR_UNC],
+ 'algo': INCREMENTAL,
+ },
+ 'sata_interface_downshift': {
+ 'options': [None, 'SATA Interface Downshift', 'events', 'external failure',
+ 'smartd_log.sata_interface_downshift', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR183],
+ 'algo': INCREMENTAL,
+ },
+ 'udma_crc_error_count': {
+ 'options': [None, 'UDMA CRC Error Count', 'errors', 'external failure', 'smartd_log.udma_crc_error_count',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR199],
+ 'algo': INCREMENTAL,
+ },
+ 'throughput_performance': {
+ 'options': [None, 'Throughput Performance', 'value', 'performance', 'smartd_log.throughput_performance',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR2],
+ 'algo': ABSOLUTE,
+ },
+ 'seek_time_performance': {
+ 'options': [None, 'Seek Time Performance', 'value', 'performance', 'smartd_log.seek_time_performance', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR8],
+ 'algo': ABSOLUTE,
+ },
+ 'start_stop_count': {
+ 'options': [None, 'Start/Stop Count', 'events', 'power', 'smartd_log.start_stop_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR4],
+ 'algo': ABSOLUTE,
+ },
+ 'power_on_hours_count': {
+ 'options': [None, 'Power-On Hours Count', 'hours', 'power', 'smartd_log.power_on_hours_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR9],
+ 'algo': ABSOLUTE,
+ },
+ 'power_cycle_count': {
+ 'options': [None, 'Power Cycle Count', 'events', 'power', 'smartd_log.power_cycle_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR12],
+ 'algo': ABSOLUTE,
+ },
+ 'unexpected_power_loss': {
+ 'options': [None, 'Unexpected Power Loss', 'events', 'power', 'smartd_log.unexpected_power_loss', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR174],
+ 'algo': ABSOLUTE,
+ },
+ 'spin_up_time': {
+ 'options': [None, 'Spin-Up Time', 'ms', 'spin', 'smartd_log.spin_up_time', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR3],
+ 'algo': ABSOLUTE,
+ },
+ 'spin_up_retries': {
+ 'options': [None, 'Spin-up Retries', 'retries', 'spin', 'smartd_log.spin_up_retries', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR10],
+ 'algo': INCREMENTAL,
+ },
+ 'calibration_retries': {
+ 'options': [None, 'Calibration Retries', 'retries', 'spin', 'smartd_log.calibration_retries', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR11],
+ 'algo': INCREMENTAL,
+ },
+ 'airflow_temperature_celsius': {
+ 'options': [None, 'Airflow Temperature Celsius', 'celsius', 'temperature',
+ 'smartd_log.airflow_temperature_celsius', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR190],
+ 'algo': ABSOLUTE,
+ },
+ 'temperature_celsius': {
+ 'options': [None, 'Temperature', 'celsius', 'temperature', 'smartd_log.temperature_celsius', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR194, ATTR_TEMPERATURE],
+ 'algo': ABSOLUTE,
+ },
+ 'reallocated_sectors_count': {
+ 'options': [None, 'Reallocated Sectors Count', 'sectors', 'wear', 'smartd_log.reallocated_sectors_count',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR5],
+ 'algo': INCREMENTAL,
+ },
+ 'reserved_block_count': {
+ 'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR170],
+ 'algo': ABSOLUTE,
+ },
+ 'program_fail_count': {
+ 'options': [None, 'Program Fail Count', 'errors', 'wear', 'smartd_log.program_fail_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR171],
+ 'algo': INCREMENTAL,
+ },
+ 'erase_fail_count': {
+ 'options': [None, 'Erase Fail Count', 'failures', 'wear', 'smartd_log.erase_fail_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR172],
+ 'algo': INCREMENTAL,
+ },
+ 'wear_leveller_worst_case_erase_count': {
+ 'options': [None, 'Wear Leveller Worst Case Erase Count', 'erases', 'wear',
+ 'smartd_log.wear_leveller_worst_case_erase_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR173],
+ 'algo': ABSOLUTE,
+ },
+ 'unused_reserved_nand_blocks': {
+ 'options': [None, 'Unused Reserved NAND Blocks', 'blocks', 'wear', 'smartd_log.unused_reserved_nand_blocks',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR180],
+ 'algo': ABSOLUTE,
+ },
+ 'reallocation_event_count': {
+ 'options': [None, 'Reallocation Event Count', 'events', 'wear', 'smartd_log.reallocation_event_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR196],
+ 'algo': INCREMENTAL,
+ },
+ 'current_pending_sector_count': {
+ 'options': [None, 'Current Pending Sector Count', 'sectors', 'wear', 'smartd_log.current_pending_sector_count',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR197],
+ 'algo': ABSOLUTE,
+ },
+ 'offline_uncorrectable_sector_count': {
+ 'options': [None, 'Offline Uncorrectable Sector Count', 'sectors', 'wear',
+ 'smartd_log.offline_uncorrectable_sector_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR198],
+ 'algo': ABSOLUTE,
+
+ },
+ 'percent_lifetime_used': {
+ 'options': [None, 'Percent Lifetime Used', 'percentage', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR202],
+ 'algo': ABSOLUTE,
+ }
+}
+
+# NOTE: 'parse_temp' decodes ATA 194 raw value. Not heavily tested. Written by @Ferroin
+# C code:
+# https://github.com/smartmontools/smartmontools/blob/master/smartmontools/atacmds.cpp#L2051
+#
+# Calling 'parse_temp' on the raw value will return a 4-tuple, containing
+# * temperature
+# * minimum
+# * maximum
+# * over-temperature count
+# substituting None for values it can't decode.
+#
+# Example:
+# >>> parse_temp(42952491042)
+# >>> (34, 10, 43, None)
+#
+#
+# def check_temp_word(i):
+# if i <= 0x7F:
+# return 0x11
+# elif i <= 0xFF:
+# return 0x01
+# elif 0xFF80 <= i:
+# return 0x10
+# return 0x00
+#
+#
+# def check_temp_range(t, b0, b1):
+# if b0 > b1:
+# t0, t1 = b1, b0
+# else:
+# t0, t1 = b0, b1
+#
+# if all([
+# -60 <= t0,
+# t0 <= t,
+# t <= t1,
+# t1 <= 120,
+# not (t0 == -1 and t1 <= 0)
+# ]):
+# return t0, t1
+# return None, None
+#
+#
+# def parse_temp(raw):
+# byte = list()
+# word = list()
+# for i in range(0, 6):
+# byte.append(0xFF & (raw >> (i * 8)))
+# for i in range(0, 3):
+# word.append(0xFFFF & (raw >> (i * 16)))
+#
+# ctwd = check_temp_word(word[0])
+#
+# if not word[2]:
+# if ctwd and not word[1]:
+# # byte[0] is temp, no other data
+# return byte[0], None, None, None
+#
+# if ctwd and all(check_temp_range(byte[0], byte[2], byte[3])):
+# # byte[0] is temp, byte[2] is max or min, byte[3] is min or max
+# trange = check_temp_range(byte[0], byte[2], byte[3])
+# return byte[0], trange[0], trange[1], None
+#
+# if ctwd and all(check_temp_range(byte[0], byte[1], byte[2])):
+# # byte[0] is temp, byte[1] is max or min, byte[2] is min or max
+# trange = check_temp_range(byte[0], byte[1], byte[2])
+# return byte[0], trange[0], trange[1], None
+#
+# return None, None, None, None
+#
+# if ctwd:
+# if all(
+# [
+# ctwd & check_temp_word(word[1]) & check_temp_word(word[2]) != 0x00,
+# all(check_temp_range(byte[0], byte[2], byte[4])),
+# ]
+# ):
+# # byte[0] is temp, byte[2] is max or min, byte[4] is min or max
+# trange = check_temp_range(byte[0], byte[2], byte[4])
+# return byte[0], trange[0], trange[1], None
+# else:
+# trange = check_temp_range(byte[0], byte[2], byte[3])
+# if word[2] < 0x7FFF and all(trange) and trange[1] >= 40:
+# # byte[0] is temp, byte[2] is max or min, byte[3] is min or max, word[2] is overtemp count
+# return byte[0], trange[0], trange[1], word[2]
+# # no data
+# return None, None, None, None
+
+
+CHARTED_ATTRS = dict((attr, k) for k, v in CHARTS.items() for attr in v['attrs'])
+
+
+class BaseAtaSmartAttribute:
+ def __init__(self, name, normalized_value, raw_value):
+ self.name = name
+ self.normalized_value = normalized_value
+ self.raw_value = raw_value
+
+ def value(self):
+ raise NotImplementedError
+
+
+class AtaRaw(BaseAtaSmartAttribute):
+ def value(self):
+ return self.raw_value
+
+
+class AtaNormalized(BaseAtaSmartAttribute):
+ def value(self):
+ return self.normalized_value
+
+
+class Ata9(BaseAtaSmartAttribute):
+ def value(self):
+ value = int(self.raw_value)
+ if value > 1e6:
+ return value & 0xFFFF
+ return value
+
+
+class Ata190(BaseAtaSmartAttribute):
+ def value(self):
+ return 100 - int(self.normalized_value)
+
+
+class Ata194(BaseAtaSmartAttribute):
+ def value(self):
+ return min(int(self.normalized_value), int(self.raw_value))
+
+
+class BaseSCSISmartAttribute:
+ def __init__(self, name, raw_value):
+ self.name = name
+ self.raw_value = raw_value
+
+ def value(self):
+ raise NotImplementedError
+
+
+class SCSIRaw(BaseSCSISmartAttribute):
+ def value(self):
+ return self.raw_value
+
+
+def ata_attribute_factory(value):
+ name = value[0]
+
+ if name == ATTR9:
+ return Ata9(*value)
+ elif name == ATTR190:
+ return Ata190(*value)
+ elif name == ATTR194:
+ return Ata194(*value)
+ elif name in [
+ ATTR1,
+ ATTR7,
+ ATTR202,
+ ATTR206,
+ ]:
+ return AtaNormalized(*value)
+
+ return AtaRaw(*value)
+
+
+def scsi_attribute_factory(value):
+ return SCSIRaw(*value)
+
+
+def attribute_factory(value):
+ name = value[0]
+ if name.isdigit():
+ return ata_attribute_factory(value)
+ return scsi_attribute_factory(value)
+
+
+def handle_error(*errors):
+ def on_method(method):
+ def on_call(*args):
+ try:
+ return method(*args)
+ except errors:
+ return None
+ return on_call
+ return on_method
+
+
+class DiskLogFile:
+ def __init__(self, full_path):
+ self.path = full_path
+ self.size = os.path.getsize(full_path)
+
+ @handle_error(OSError)
+ def is_changed(self):
+ return self.size != os.path.getsize(self.path)
+
+ @handle_error(OSError)
+ def is_active(self, current_time, limit):
+ return (current_time - os.path.getmtime(self.path)) / 60 < limit
+
+ @handle_error(OSError)
+ def read(self):
+ self.size = os.path.getsize(self.path)
+ return read_last_line(self.path)
+
+
+class BaseDisk:
+ def __init__(self, name, log_file):
+ self.name = re.sub(r'_+', '_', name)
+ self.log_file = log_file
+ self.attrs = list()
+ self.alive = True
+ self.charted = False
+
+ def __eq__(self, other):
+ if isinstance(other, BaseDisk):
+ return self.name == other.name
+ return self.name == other
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def parser(self, data):
+ raise NotImplementedError
+
+ @handle_error(TypeError)
+ def populate_attrs(self):
+ self.attrs = list()
+ line = self.log_file.read()
+ for value in self.parser(line):
+ self.attrs.append(attribute_factory(value))
+
+ return len(self.attrs)
+
+ def data(self):
+ data = dict()
+ for attr in self.attrs:
+ data['{0}_{1}'.format(self.name, attr.name)] = attr.value()
+ return data
+
+
+class ATADisk(BaseDisk):
+ def parser(self, data):
+ return RE_ATA.findall(data)
+
+
+class SCSIDisk(BaseDisk):
+ def parser(self, data):
+ return RE_SCSI.findall(data)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.log_path = configuration.get('log_path', DEF_PATH)
+ self.age = configuration.get('age', DEF_AGE)
+ self.exclude = configuration.get('exclude_disks', str()).split()
+ self.disks = list()
+ self.runs = 0
+
+ def check(self):
+ return self.scan() > 0
+
+ def get_data(self):
+ self.runs += 1
+
+ if self.runs % DEF_RESCAN_INTERVAL == 0:
+ self.cleanup()
+ self.scan()
+
+ data = dict()
+
+ for disk in self.disks:
+ if not disk.alive:
+ continue
+
+ if not disk.charted:
+ self.add_disk_to_charts(disk)
+
+ changed = disk.log_file.is_changed()
+
+ if changed is None:
+ disk.alive = False
+ continue
+
+ if changed and disk.populate_attrs() is None:
+ disk.alive = False
+ continue
+
+ data.update(disk.data())
+
+ return data
+
+ def cleanup(self):
+ current_time = time()
+ for disk in self.disks[:]:
+ if any(
+ [
+ not disk.alive,
+ not disk.log_file.is_active(current_time, self.age),
+ ]
+ ):
+ self.disks.remove(disk.name)
+ self.remove_disk_from_charts(disk)
+
+ def scan(self):
+ self.debug('scanning {0}'.format(self.log_path))
+ current_time = time()
+
+ for full_name in os.listdir(self.log_path):
+ disk = self.create_disk_from_file(full_name, current_time)
+ if not disk:
+ continue
+ self.disks.append(disk)
+
+ return len(self.disks)
+
+ def create_disk_from_file(self, full_name, current_time):
+ if not full_name.endswith(CSV):
+ self.debug('skipping {0}: not a csv file'.format(full_name))
+ return None
+
+ name = os.path.basename(full_name).split('.')[-3]
+ path = os.path.join(self.log_path, full_name)
+
+ if name in self.disks:
+ return None
+
+ if [p for p in self.exclude if p in name]:
+ return None
+
+ if not os.access(path, os.R_OK):
+ self.debug('skipping {0}: not readable'.format(full_name))
+ return None
+
+ if os.path.getsize(path) == 0:
+ self.debug('skipping {0}: zero size'.format(full_name))
+ return None
+
+ if (current_time - os.path.getmtime(path)) / 60 > self.age:
+ self.debug('skipping {0}: haven\'t been updated for last {1} minutes'.format(full_name, self.age))
+ return None
+
+ if ATA in full_name:
+ disk = ATADisk(name, DiskLogFile(path))
+ elif SCSI in full_name:
+ disk = SCSIDisk(name, DiskLogFile(path))
+ else:
+ self.debug('skipping {0}: unknown type'.format(full_name))
+ return None
+
+ disk.populate_attrs()
+ if not disk.attrs:
+ self.error('skipping {0}: parsing failed'.format(full_name))
+ return None
+
+ self.debug('added {0}'.format(full_name))
+ return disk
+
+ def add_disk_to_charts(self, disk):
+ if len(self.charts) == 0 or disk.charted:
+ return
+ disk.charted = True
+
+ for attr in disk.attrs:
+ chart_id = CHARTED_ATTRS.get(attr.name)
+
+ if not chart_id or chart_id not in self.charts:
+ continue
+
+ chart = self.charts[chart_id]
+ dim = [
+ '{0}_{1}'.format(disk.name, attr.name),
+ disk.name,
+ CHARTS[chart_id]['algo'],
+ ]
+
+ if dim[0] in self.charts[chart_id].dimensions:
+ chart.hide_dimension(dim[0], reverse=True)
+ else:
+ chart.add_dimension(dim)
+
+ def remove_disk_from_charts(self, disk):
+ if len(self.charts) == 0 or not disk.charted:
+ return
+
+ for attr in disk.attrs:
+ chart_id = CHARTED_ATTRS.get(attr.name)
+
+ if not chart_id or chart_id not in self.charts:
+ continue
+
+ # TODO: can't delete dimension
+ self.charts[chart_id].hide_dimension('{0}_{1}'.format(disk.name, attr.name))
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
new file mode 100644
index 0000000..4f138d1
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf
@@ -0,0 +1,67 @@
+# netdata python.d.plugin configuration for smartd log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, smartd_log also supports the following:
+#
+# log_path: '/path/to/smartd_logs' # path to smartd log files. Default is /var/log/smartd
+# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/spigotmc/Makefile.inc b/collectors/python.d.plugin/spigotmc/Makefile.inc
new file mode 100644
index 0000000..f9fa8b6
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += spigotmc/spigotmc.chart.py
+dist_pythonconfig_DATA += spigotmc/spigotmc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += spigotmc/README.md spigotmc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
new file mode 100644
index 0000000..c389305
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -0,0 +1,24 @@
+# spigotmc
+
+This module does some really basic monitoring for Spigot Minecraft servers.
+
+It provides two charts, one tracking server-side ticks-per-second in
+1, 5 and 15 minute averages, and one tracking the number of currently
+active users.
+
+This is not compatible with Spigot plugins which change the format of
+the data returned by the `tps` or `list` console commands.
+
+### configuration
+
+```yaml
+host: localhost
+port: 25575
+password: pass
+```
+
+By default, a connection to port 25575 on the local system is attempted with an empty password.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspigotmc%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
new file mode 100644
index 0000000..09674f5
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+# Description: spigotmc netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+import platform
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+from third_party import mcrcon
+
+# Update only every 5 seconds because collection takes in excess of
+# 100ms sometimes, and mos tpeople won't care about second-by-second data.
+update_every = 5
+
+PRECISION = 100
+
+ORDER = [
+ 'tps',
+ 'users',
+]
+
+CHARTS = {
+ 'tps': {
+ 'options': [None, 'Spigot Ticks Per Second', 'ticks', 'spigotmc', 'spigotmc.tps', 'line'],
+ 'lines': [
+ ['tps1', '1 Minute Average', 'absolute', 1, PRECISION],
+ ['tps5', '5 Minute Average', 'absolute', 1, PRECISION],
+ ['tps15', '15 Minute Average', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'users': {
+ 'options': [None, 'Minecraft Users', 'users', 'spigotmc', 'spigotmc.users', 'area'],
+ 'lines': [
+ ['users', 'Users', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 25575)
+ self.password = self.configuration.get('password', '')
+ self.console = mcrcon.MCRcon()
+ self.alive = True
+
+ def check(self):
+ if platform.system() != 'Linux':
+ self.error('Only supported on Linux.')
+ return False
+ try:
+ self.connect()
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+ return True
+
+ def connect(self):
+ self.console.connect(self.host, self.port, self.password)
+
+ def reconnect(self):
+ try:
+ try:
+ self.console.disconnect()
+ except mcrcon.MCRconException:
+ pass
+ self.console.connect(self.host, self.port, self.password)
+ self.alive = True
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+ return True
+
+ def is_alive(self):
+ if (not self.alive) or \
+ self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1:
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+ data = {}
+ try:
+ raw = self.console.command('tps')
+ # The above command returns a string that looks like this:
+ # '§6TPS from last 1m, 5m, 15m: §a19.99, §a19.99, §a19.99\n'
+ # The values we care about are the three numbers after the :
+ tmp = raw.split(':')[1].split(',')
+ data['tps1'] = float(tmp[0].lstrip(u' §a*')) * PRECISION
+ data['tps5'] = float(tmp[1].lstrip(u' §a*')) * PRECISION
+ data['tps15'] = float(tmp[2].lstrip(u' §a*').rstrip()) * PRECISION
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch TPS values.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+ except (TypeError, LookupError):
+ self.error('Unable to process TPS values.')
+ try:
+ raw = self.console.command('list')
+ # The above command returns a string that looks like this:
+ # 'There are 0/20 players online:'
+ # We care about the first number here.
+ data['users'] = int(raw.split()[2].split('/')[0])
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch user counts.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+ except (TypeError, LookupError):
+ self.error('Unable to process user counts.')
+ return data
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
new file mode 100644
index 0000000..ccb5e26
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for spigotmc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# In addition to the above, spigotmc supports the following:
+#
+# host: localhost # The host to connect to. Defaults to the local system.
+# port: 25575 # THe port the remote console is listening on.
+# password: '' # The remote console password. Most be set correctly.
diff --git a/collectors/python.d.plugin/springboot/Makefile.inc b/collectors/python.d.plugin/springboot/Makefile.inc
new file mode 100644
index 0000000..06775f9
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += springboot/springboot.chart.py
+dist_pythonconfig_DATA += springboot/springboot.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += springboot/README.md springboot/Makefile.inc
+
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
new file mode 100644
index 0000000..b5b776d
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -0,0 +1,124 @@
+# springboot
+
+This module will monitor one or more Java Spring-boot applications depending on configuration.
+Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
+
+## Configuration
+
+The Spring Boot Actuator exposes these metrics over HTTP and is very easy to use:
+* add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies
+* set `endpoints.metrics.sensitive=false` in your `application.properties`
+
+You can create custom Metrics by add and inject a PublicMetrics in your application.
+This is a example to add custom metrics:
+```java
+package com.example;
+
+import org.springframework.boot.actuate.endpoint.PublicMetrics;
+import org.springframework.boot.actuate.metrics.Metric;
+import org.springframework.stereotype.Service;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.util.ArrayList;
+import java.util.Collection;
+
+@Service
+public class HeapPoolMetrics implements PublicMetrics {
+
+ private static final String PREFIX = "mempool.";
+ private static final String KEY_EDEN = PREFIX + "eden";
+ private static final String KEY_SURVIVOR = PREFIX + "survivor";
+ private static final String KEY_TENURED = PREFIX + "tenured";
+
+ @Override
+ public Collection<Metric<?>> metrics() {
+ Collection<Metric<?>> result = new ArrayList<>(4);
+ for (MemoryPoolMXBean mem : ManagementFactory.getMemoryPoolMXBeans()) {
+ String poolName = mem.getName();
+ String name = null;
+ if (poolName.indexOf("Eden Space") != -1) {
+ name = KEY_EDEN;
+ } else if (poolName.indexOf("Survivor Space") != -1) {
+ name = KEY_SURVIVOR;
+ } else if (poolName.indexOf("Tenured Gen") != -1 || poolName.indexOf("Old Gen") != -1) {
+ name = KEY_TENURED;
+ }
+
+ if (name != null) {
+ result.add(newMemoryMetric(name, mem.getUsage().getMax()));
+ result.add(newMemoryMetric(name + ".init", mem.getUsage().getInit()));
+ result.add(newMemoryMetric(name + ".committed", mem.getUsage().getCommitted()));
+ result.add(newMemoryMetric(name + ".used", mem.getUsage().getUsed()));
+ }
+ }
+ return result;
+ }
+
+ private Metric<Long> newMemoryMetric(String name, long bytes) {
+ return new Metric<>(name, bytes / 1024);
+ }
+}
+```
+
+Please refer [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.
+
+## Charts
+
+1. **Response Codes** in requests/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+ * others
+
+2. **Threads**
+ * daemon
+ * total
+
+3. **GC Time** in milliseconds and **GC Operations** in operations/s
+ * Copy
+ * MarkSweep
+ * ...
+
+4. **Heap Mmeory Usage** in KB
+ * used
+ * committed
+
+## Usage
+
+The springboot module is enabled by default. It looks up `http://localhost:8080/metrics` and `http://127.0.0.1:8080/metrics` to detect Spring Boot application by default. You can change it by editing `/etc/netdata/python.d/springboot.conf` (to edit it on your system run `/etc/netdata/edit-config python.d/springboot.conf`).
+
+This module defines some common charts, and you can add custom charts by change the configurations.
+
+The configuration format is like:
+```yaml
+<id>:
+ name: '<name>'
+ url: '<metrics endpoint>' # ex. http://localhost:8080/metrics
+ user: '<username>' # optional
+ pass: '<password>' # optional
+ defaults:
+ [<chart-id>]: true|false
+ extras:
+ - id: '<chart-id>'
+ options:
+ title: '***'
+ units: '***'
+ family: '***'
+ context: 'springboot.***'
+ charttype: 'stacked' | 'area' | 'line'
+ lines:
+ - { dimension: 'myapp_ok', name: 'ok', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ok" metrics
+ - { dimension: 'myapp_ng', name: 'ng', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ng" metrics
+```
+
+By default, it creates `response_code`, `threads`, `gc_time`, `gc_ope` abd `heap` charts.
+You can disable the default charts by set `defaults.<chart-id>: false`.
+
+The dimension name of extras charts should replace `.` to `_`.
+
+Please check [springboot.conf](springboot.conf) for more examples.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fspringboot%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
new file mode 100644
index 0000000..eec870e
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Description: tomcat netdata python.d module
+# Author: Wing924
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+from bases.FrameworkServices.UrlService import UrlService
+
+
+DEFAULT_ORDER = [
+ 'response_code',
+ 'threads',
+ 'gc_time',
+ 'gc_ope',
+ 'heap',
+]
+
+DEFAULT_CHARTS = {
+ 'response_code': {
+ 'options': [None, "Response Codes", "requests/s", "response", "springboot.response_code", "stacked"],
+ 'lines': [
+ ["resp_other", 'Other', 'incremental'],
+ ["resp_1xx", '1xx', 'incremental'],
+ ["resp_2xx", '2xx', 'incremental'],
+ ["resp_3xx", '3xx', 'incremental'],
+ ["resp_4xx", '4xx', 'incremental'],
+ ["resp_5xx", '5xx', 'incremental'],
+ ]
+ },
+ 'threads': {
+ 'options': [None, "Threads", "current threads", "threads", "springboot.threads", "area"],
+ 'lines': [
+ ["threads_daemon", 'daemon', 'absolute'],
+ ["threads", 'total', 'absolute'],
+ ]
+ },
+ 'gc_time': {
+ 'options': [None, "GC Time", "milliseconds", "garbage collection", "springboot.gc_time", "stacked"],
+ 'lines': [
+ ["gc_copy_time", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_time", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'],
+ ]
+ },
+ 'gc_ope': {
+ 'options': [None, "GC Operations", "operations/s", "garbage collection", "springboot.gc_ope", "stacked"],
+ 'lines': [
+ ["gc_copy_count", 'Copy', 'incremental'],
+ ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'],
+ ["gc_parnew_count", 'ParNew', 'incremental'],
+ ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'],
+ ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'],
+ ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'],
+ ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'],
+ ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'],
+ ]
+ },
+ 'heap': {
+ 'options': [None, "Heap Memory Usage", "KiB", "heap memory", "springboot.heap", "area"],
+ 'lines': [
+ ["heap_committed", 'committed', "absolute"],
+ ["heap_used", 'used', "absolute"],
+ ]
+ }
+}
+
+
+class ExtraChartError(ValueError):
+ pass
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', "http://localhost:8080/metrics")
+ self._setup_charts()
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ data = json.loads(raw_data)
+ except ValueError:
+ self.debug('%s is not a vaild JSON page' % self.url)
+ return None
+
+ result = {
+ 'resp_1xx': 0,
+ 'resp_2xx': 0,
+ 'resp_3xx': 0,
+ 'resp_4xx': 0,
+ 'resp_5xx': 0,
+ 'resp_other': 0,
+ }
+
+ for key, value in data.iteritems():
+ if 'counter.status.' in key:
+ status_type = key[15:16] + 'xx'
+ if status_type[0] not in '12345':
+ status_type = 'other'
+ result['resp_' + status_type] += value
+ else:
+ result[key.replace('.', '_')] = value
+
+ return result or None
+
+ def _setup_charts(self):
+ self.order = []
+ self.definitions = {}
+ defaults = self.configuration.get('defaults', {})
+
+ for chart in DEFAULT_ORDER:
+ if defaults.get(chart, True):
+ self.order.append(chart)
+ self.definitions[chart] = DEFAULT_CHARTS[chart]
+
+ for extra in self.configuration.get('extras', []):
+ self._add_extra_chart(extra)
+ self.order.append(extra['id'])
+
+ def _add_extra_chart(self, chart):
+ chart_id = chart.get('id', None) or self.die('id is not defined in extra chart')
+ options = chart.get('options', None) or self.die('option is not defined in extra chart: %s' % chart_id)
+ lines = chart.get('lines', None) or self.die('lines is not defined in extra chart: %s' % chart_id)
+
+ title = options.get('title', None) or self.die('title is missing: %s' % chart_id)
+ units = options.get('units', None) or self.die('units is missing: %s' % chart_id)
+ family = options.get('family', title)
+ context = options.get('context', 'springboot.' + title)
+ charttype = options.get('charttype', 'line')
+
+ result = {
+ 'options': [None, title, units, family, context, charttype],
+ 'lines': [],
+ }
+
+ for line in lines:
+ dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
+ name = line.get('name', dimension)
+ algorithm = line.get('algorithm', 'absolute')
+ multiplier = line.get('multiplier', 1)
+ divisor = line.get('divisor', 1)
+ result['lines'].append([dimension, name, algorithm, multiplier, divisor])
+
+ self.definitions[chart_id] = result
+
+ @staticmethod
+ def die(error_message):
+ raise ExtraChartError(error_message)
diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
new file mode 100644
index 0000000..13a3989
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/springboot.conf
@@ -0,0 +1,118 @@
+# netdata python.d.plugin configuration for springboot
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'http://127.0.0.1/metrics' # the URL of the spring boot actuator metrics
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# defaults:
+# [chart_id]: true | false # enables/disables default charts, defaults true.
+# extras: {} # defines extra charts to monitor, please see the example below
+# - id: [chart_id]
+# options: {}
+# lines: []
+#
+# If all defaults is disabled and no extra charts are defined, this module will disable itself, as it has no data to
+# collect.
+#
+# Configuration example
+# ---------------------
+# expample:
+# name: 'example'
+# url: 'http://localhost:8080/metrics'
+# defaults:
+# response_code: true
+# threads: true
+# gc_time: true
+# gc_ope: true
+# heap: false
+# extras:
+# - id: 'heap'
+# options: { title: 'Heap Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap', charttype: 'stacked' }
+# lines:
+# - { dimension: 'mem_free', name: 'free'}
+# - { dimension: 'mempool_eden_used', name: 'eden', algorithm: 'absolute', multiplier: 1, divisor: 1}
+# - { dimension: 'mempool_survivor_used', name: 'survivor', algorithm: 'absolute', multiplier: 1, divisor: 1}
+# - { dimension: 'mempool_tenured_used', name: 'tenured', algorithm: 'absolute', multiplier: 1, divisor: 1}
+# - id: 'heap_eden'
+# options: { title: 'Eden Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_eden', charttype: 'area' }
+# lines:
+# - { dimension: 'mempool_eden_used', name: 'used'}
+# - { dimension: 'mempool_eden_committed', name: 'commited'}
+# - id: 'heap_survivor'
+# options: { title: 'Survivor Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_survivor', charttype: 'area' }
+# lines:
+# - { dimension: 'mempool_survivor_used', name: 'used'}
+# - { dimension: 'mempool_survivor_committed', name: 'commited'}
+# - id: 'heap_tenured'
+# options: { title: 'Tenured Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_tenured', charttype: 'area' }
+# lines:
+# - { dimension: 'mempool_tenured_used', name: 'used'}
+# - { dimension: 'mempool_tenured_committed', name: 'commited'}
+
+
+local:
+ name: 'local'
+ url: 'http://localhost:8080/metrics'
+
+local_ip:
+ name: 'local'
+ url: 'http://127.0.0.1:8080/metrics'
diff --git a/collectors/python.d.plugin/squid/Makefile.inc b/collectors/python.d.plugin/squid/Makefile.inc
new file mode 100644
index 0000000..76ecff8
--- /dev/null
+++ b/collectors/python.d.plugin/squid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += squid/squid.chart.py
+dist_pythonconfig_DATA += squid/squid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += squid/README.md squid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
new file mode 100644
index 0000000..b278f41
--- /dev/null
+++ b/collectors/python.d.plugin/squid/README.md
@@ -0,0 +1,40 @@
+# squid
+
+This module will monitor one or more squid instances depending on configuration.
+
+It produces following charts:
+
+1. **Client Bandwidth** in kilobits/s
+ * in
+ * out
+ * hits
+
+2. **Client Requests** in requests/s
+ * requests
+ * hits
+ * errors
+
+3. **Server Bandwidth** in kilobits/s
+ * in
+ * out
+
+4. **Server Requests** in requests/s
+ * requests
+ * errors
+
+### configuration
+
+```yaml
+priority : 50000
+
+local:
+ request : 'cache_object://localhost:3128/counters'
+ host : 'localhost'
+ port : 3128
+```
+
+Without any configuration module will try to autodetect where squid presents its `counters` data
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fsquid%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
new file mode 100644
index 0000000..c00556b
--- /dev/null
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# Description: squid netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+
+ORDER = [
+ 'clients_net',
+ 'clients_requests',
+ 'servers_net',
+ 'servers_requests',
+]
+
+CHARTS = {
+ 'clients_net': {
+ 'options': [None, 'Squid Client Bandwidth', 'kilobits/s', 'clients', 'squid.clients_net', 'area'],
+ 'lines': [
+ ['client_http_kbytes_in', 'in', 'incremental', 8, 1],
+ ['client_http_kbytes_out', 'out', 'incremental', -8, 1],
+ ['client_http_hit_kbytes_out', 'hits', 'incremental', -8, 1]
+ ]
+ },
+ 'clients_requests': {
+ 'options': [None, 'Squid Client Requests', 'requests/s', 'clients', 'squid.clients_requests', 'line'],
+ 'lines': [
+ ['client_http_requests', 'requests', 'incremental'],
+ ['client_http_hits', 'hits', 'incremental'],
+ ['client_http_errors', 'errors', 'incremental', -1, 1]
+ ]
+ },
+ 'servers_net': {
+ 'options': [None, 'Squid Server Bandwidth', 'kilobits/s', 'servers', 'squid.servers_net', 'area'],
+ 'lines': [
+ ['server_all_kbytes_in', 'in', 'incremental', 8, 1],
+ ['server_all_kbytes_out', 'out', 'incremental', -8, 1]
+ ]
+ },
+ 'servers_requests': {
+ 'options': [None, 'Squid Server Requests', 'requests/s', 'servers', 'squid.servers_requests', 'line'],
+ 'lines': [
+ ['server_all_requests', 'requests', 'incremental'],
+ ['server_all_errors', 'errors', 'incremental', -1, 1]
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self._keep_alive = True
+ self.request = ''
+ self.host = 'localhost'
+ self.port = 3128
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Get data via http request
+ :return: dict
+ """
+ response = self._get_raw_data()
+
+ data = dict()
+ try:
+ raw = ''
+ for tmp in response.split('\r\n'):
+ if tmp.startswith('sample_time'):
+ raw = tmp
+ break
+
+ if raw.startswith('<'):
+ self.error('invalid data received')
+ return None
+
+ for row in raw.split('\n'):
+ if row.startswith(('client', 'server.all')):
+ tmp = row.split('=')
+ data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
+
+ except (ValueError, AttributeError, TypeError):
+ self.error('invalid data received')
+ return None
+
+ if not data:
+ self.error('no data received')
+ return None
+ return data
+
+ def _check_raw_data(self, data):
+ header = data[:1024].lower()
+
+ if 'connection: keep-alive' in header:
+ self._keep_alive = True
+ else:
+ self._keep_alive = False
+
+ if data[-7:] == '\r\n0\r\n\r\n' and 'transfer-encoding: chunked' in header: # HTTP/1.1 response
+ self.debug('received full response from squid')
+ return True
+
+ self.debug('waiting more data from squid')
+ return False
+
+ def check(self):
+ """
+ Parse essential configuration, autodetect squid configuration (if needed), and check if data is available
+ :return: boolean
+ """
+ self._parse_config()
+ # format request
+ req = self.request.decode()
+ if not req.startswith('GET'):
+ req = 'GET ' + req
+ if not req.endswith(' HTTP/1.1\r\n\r\n'):
+ req += ' HTTP/1.1\r\n\r\n'
+ self.request = req.encode()
+ if self._get_data() is not None:
+ return True
+ else:
+ return False
diff --git a/collectors/python.d.plugin/squid/squid.conf b/collectors/python.d.plugin/squid/squid.conf
new file mode 100644
index 0000000..b90a52c
--- /dev/null
+++ b/collectors/python.d.plugin/squid/squid.conf
@@ -0,0 +1,167 @@
+# netdata python.d.plugin configuration for squid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, squid also supports the following:
+#
+# host : 'IP or HOSTNAME' # the host to connect to
+# port : PORT # the port to connect to
+# request: 'URL' # the URL to request from squid
+#
+
+# ----------------------------------------------------------------------
+# SQUID CONFIGURATION
+#
+# See:
+# http://wiki.squid-cache.org/Features/CacheManager
+#
+# In short, add to your squid configuration these:
+#
+# http_access allow localhost manager
+# http_access deny manager
+#
+# To remotely monitor a squid:
+#
+# acl managerAdmin src 192.0.2.1
+# http_access allow localhost manager
+# http_access allow managerAdmin manager
+# http_access deny manager
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp3128old:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : 'cache_object://localhost:3128/counters'
+
+tcp8080old:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : 'cache_object://localhost:3128/counters'
+
+tcp3128new:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080new:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv4
+
+tcp3128oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp8080oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp3128newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv6
+
+tcp3128oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp8080oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp3128newipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
diff --git a/collectors/python.d.plugin/tomcat/Makefile.inc b/collectors/python.d.plugin/tomcat/Makefile.inc
new file mode 100644
index 0000000..940a783
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += tomcat/tomcat.chart.py
+dist_pythonconfig_DATA += tomcat/tomcat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc
+
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
new file mode 100644
index 0000000..21e3896
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -0,0 +1,35 @@
+# tomcat
+
+Present tomcat containers memory utilization.
+
+Charts:
+
+1. **Requests** per second
+ * accesses
+
+2. **Volume** in KB/s
+ * volume
+
+3. **Threads**
+ * current
+ * busy
+
+4. **JVM Free Memory** in MB
+ * jvm
+
+### configuration
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+ user : 'tomcat_username'
+ pass : 'secret_tomcat_password'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials.
+So it will probably fail.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftomcat%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
new file mode 100644
index 0000000..01578c5
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -0,0 +1,168 @@
+# -*- coding: utf-8 -*-
+# Description: tomcat netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Wei He (Wing924)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import xml.etree.ElementTree as ET
+
+from bases.FrameworkServices.UrlService import UrlService
+
+MiB = 1 << 20
+
+ORDER = [
+ 'accesses',
+ 'bandwidth',
+ 'processing_time',
+ 'threads',
+ 'jvm',
+ 'jvm_eden',
+ 'jvm_survivor',
+ 'jvm_tenured',
+]
+
+CHARTS = {
+ 'accesses': {
+ 'options': [None, 'Requests', 'requests/s', 'statistics', 'tomcat.accesses', 'area'],
+ 'lines': [
+ ['requestCount', 'accesses', 'incremental'],
+ ['errorCount', 'errors', 'incremental'],
+ ]
+ },
+ 'bandwidth': {
+ 'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'],
+ 'lines': [
+ ['bytesSent', 'sent', 'incremental', 1, 1024],
+ ['bytesReceived', 'received', 'incremental', 1, 1024],
+ ]
+ },
+ 'processing_time': {
+ 'options': [None, 'processing time', 'seconds', 'statistics', 'tomcat.processing_time', 'area'],
+ 'lines': [
+ ['processingTime', 'processing time', 'incremental', 1, 1000]
+ ]
+ },
+ 'threads': {
+ 'options': [None, 'Threads', 'current threads', 'statistics', 'tomcat.threads', 'area'],
+ 'lines': [
+ ['currentThreadCount', 'current', 'absolute'],
+ ['currentThreadsBusy', 'busy', 'absolute']
+ ]
+ },
+ 'jvm': {
+ 'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'],
+ 'lines': [
+ ['free', 'free', 'absolute', 1, MiB],
+ ['eden_used', 'eden', 'absolute', 1, MiB],
+ ['survivor_used', 'survivor', 'absolute', 1, MiB],
+ ['tenured_used', 'tenured', 'absolute', 1, MiB],
+ ['code_cache_used', 'code cache', 'absolute', 1, MiB],
+ ['compressed_used', 'compressed', 'absolute', 1, MiB],
+ ['metaspace_used', 'metaspace', 'absolute', 1, MiB],
+ ]
+ },
+ 'jvm_eden': {
+ 'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'],
+ 'lines': [
+ ['eden_used', 'used', 'absolute', 1, MiB],
+ ['eden_committed', 'committed', 'absolute', 1, MiB],
+ ['eden_max', 'max', 'absolute', 1, MiB]
+ ]
+ },
+ 'jvm_survivor': {
+ 'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'],
+ 'lines': [
+ ['survivor_used', 'used', 'absolute', 1, MiB],
+ ['survivor_committed', 'committed', 'absolute', 1, MiB],
+ ['survivor_max', 'max', 'absolute', 1, MiB],
+ ]
+ },
+ 'jvm_tenured': {
+ 'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'],
+ 'lines': [
+ ['tenured_used', 'used', 'absolute', 1, MiB],
+ ['tenured_committed', 'committed', 'absolute', 1, MiB],
+ ['tenured_max', 'max', 'absolute', 1, MiB]
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
+ self.connector_name = self.configuration.get('connector_name', None)
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = None
+ raw_data = self._get_raw_data()
+ if raw_data:
+ try:
+ xml = ET.fromstring(raw_data)
+ except ET.ParseError:
+ self.debug('%s is not a vaild XML page. Please add "?XML=true" to tomcat status page.' % self.url)
+ return None
+ data = {}
+
+ jvm = xml.find('jvm')
+
+ connector = None
+ if self.connector_name:
+ for conn in xml.findall('connector'):
+ if self.connector_name in conn.get('name'):
+ connector = conn
+ break
+ else:
+ connector = xml.find('connector')
+
+ memory = jvm.find('memory')
+ data['free'] = memory.get('free')
+ data['total'] = memory.get('total')
+
+ for pool in jvm.findall('memorypool'):
+ name = pool.get('name')
+ if 'Eden Space' in name:
+ data['eden_used'] = pool.get('usageUsed')
+ data['eden_committed'] = pool.get('usageCommitted')
+ data['eden_max'] = pool.get('usageMax')
+ elif 'Survivor Space' in name:
+ data['survivor_used'] = pool.get('usageUsed')
+ data['survivor_committed'] = pool.get('usageCommitted')
+ data['survivor_max'] = pool.get('usageMax')
+ elif 'Tenured Gen' in name or 'Old Gen' in name:
+ data['tenured_used'] = pool.get('usageUsed')
+ data['tenured_committed'] = pool.get('usageCommitted')
+ data['tenured_max'] = pool.get('usageMax')
+ elif name == 'Code Cache':
+ data['code_cache_used'] = pool.get('usageUsed')
+ data['code_cache_committed'] = pool.get('usageCommitted')
+ data['code_cache_max'] = pool.get('usageMax')
+ elif name == 'Compressed':
+ data['compressed_used'] = pool.get('usageUsed')
+ data['compressed_committed'] = pool.get('usageCommitted')
+ data['compressed_max'] = pool.get('usageMax')
+ elif name == 'Metaspace':
+ data['metaspace_used'] = pool.get('usageUsed')
+ data['metaspace_committed'] = pool.get('usageCommitted')
+ data['metaspace_max'] = pool.get('usageMax')
+
+ if connector:
+ thread_info = connector.find('threadInfo')
+ data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy')
+ data['currentThreadCount'] = thread_info.get('currentThreadCount')
+
+ request_info = connector.find('requestInfo')
+ data['processingTime'] = request_info.get('processingTime')
+ data['requestCount'] = request_info.get('requestCount')
+ data['errorCount'] = request_info.get('errorCount')
+ data['bytesReceived'] = request_info.get('bytesReceived')
+ data['bytesSent'] = request_info.get('bytesSent')
+
+ return data or None
diff --git a/collectors/python.d.plugin/tomcat/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf
new file mode 100644
index 0000000..009591b
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/tomcat.conf
@@ -0,0 +1,89 @@
+# netdata python.d.plugin configuration for tomcat
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, tomcat also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# if you have multiple connectors, the following are supported:
+#
+# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8080/manager/status?XML=true'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]:8080/manager/status?XML=true'
diff --git a/collectors/python.d.plugin/tor/Makefile.inc b/collectors/python.d.plugin/tor/Makefile.inc
new file mode 100644
index 0000000..5a45f9b
--- /dev/null
+++ b/collectors/python.d.plugin/tor/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += tor/tor.chart.py
+dist_pythonconfig_DATA += tor/tor.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tor/README.md tor/Makefile.inc
+
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
new file mode 100644
index 0000000..2ce0f25
--- /dev/null
+++ b/collectors/python.d.plugin/tor/README.md
@@ -0,0 +1,48 @@
+# tor
+
+Module connects to tor control port to collect traffic statistics.
+
+**Requirements:**
+* `tor` program
+* `stem` python package
+
+It produces only one chart:
+
+1. **Traffic**
+ * read
+ * write
+
+### configuration
+
+Needs only `control_port`
+
+Here is an example for local server:
+
+```yaml
+update_every : 1
+priority : 60000
+
+local_tcp:
+ name: 'local'
+ control_port: 9051
+
+local_socket:
+ name: 'local'
+ control_port: '/var/run/tor/control'
+```
+
+### prerequisite
+
+Add to `/etc/tor/torrc`:
+
+```
+ControlPort 9051
+```
+
+For more options please read the manual.
+
+Without configuration, module attempts to connect to `127.0.0.1:9051`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py
new file mode 100644
index 0000000..dd61e6e
--- /dev/null
+++ b/collectors/python.d.plugin/tor/tor.chart.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# Description: adaptec_raid netdata python.d module
+# Author: Federico Ceratto <federico.ceratto@gmail.com>
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+try:
+ import stem
+ import stem.connection
+ import stem.control
+ STEM_AVAILABLE = True
+except ImportError:
+ STEM_AVAILABLE = False
+
+
+DEF_PORT = 'default'
+
+ORDER = [
+ 'traffic',
+]
+
+CHARTS = {
+ 'traffic': {
+ 'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'],
+ 'lines': [
+ ['read', 'read', 'incremental', 1, 1024],
+ ['write', 'write', 'incremental', 1, -1024],
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ """Provide netdata service for Tor"""
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.port = self.configuration.get('control_port', DEF_PORT)
+ self.password = self.configuration.get('password')
+ self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
+ self.conn = None
+ self.alive = False
+
+ def check(self):
+ if not STEM_AVAILABLE:
+ self.error('the stem library is missing')
+ return False
+
+ return self.connect()
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ data = dict()
+
+ try:
+ data['read'] = self.conn.get_info('traffic/read')
+ data['write'] = self.conn.get_info('traffic/written')
+ except stem.ControllerError as error:
+ self.debug(error)
+ self.alive = False
+
+ return data or None
+
+ def authenticate(self):
+ try:
+ self.conn.authenticate(password=self.password)
+ except stem.connection.AuthenticationFailure as error:
+ self.error('authentication error: {0}'.format(error))
+ return False
+ return True
+
+ def connect_via_port(self):
+ try:
+ self.conn = stem.control.Controller.from_port(port=self.port)
+ except (stem.SocketError, ValueError) as error:
+ self.error(error)
+
+ def connect_via_socket(self):
+ try:
+ self.conn = stem.control.Controller.from_socket_file(path=self.port)
+ except (stem.SocketError, ValueError) as error:
+ self.error(error)
+
+ def connect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+
+ if self.use_socket:
+ self.connect_via_socket()
+ else:
+ self.connect_via_port()
+
+ if self.conn and self.authenticate():
+ self.alive = True
+
+ return self.alive
+
+ def reconnect(self):
+ return self.connect()
diff --git a/collectors/python.d.plugin/tor/tor.conf b/collectors/python.d.plugin/tor/tor.conf
new file mode 100644
index 0000000..91b517a
--- /dev/null
+++ b/collectors/python.d.plugin/tor/tor.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for tor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, tor plugin also supports the following:
+#
+# control_port: 'port' # tor control port
+# password: 'password' # tor control password
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+# local_tcp:
+# name: 'local'
+# control_port: 9051
+#
+# local_socket:
+# name: 'local'
+# control_port: '/var/run/tor/control'
diff --git a/collectors/python.d.plugin/traefik/Makefile.inc b/collectors/python.d.plugin/traefik/Makefile.inc
new file mode 100644
index 0000000..926d56d
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += traefik/traefik.chart.py
+dist_pythonconfig_DATA += traefik/traefik.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += traefik/README.md traefik/Makefile.inc
+
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
new file mode 100644
index 0000000..61e0fdb
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -0,0 +1,55 @@
+# traefik
+
+Module uses the `health` API to provide statistics.
+
+It produces:
+
+1. **Responses** by statuses
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+2. **Responses** by codes
+ * 2xx (successful)
+ * 5xx (internal server errors)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 1xx (informational)
+ * other (non-standart responses)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Requests**/s
+ * request statistics
+
+5. **Total response time**
+ * sum of all response time
+
+6. **Average response time**
+
+7. **Average response time per iteration**
+
+8. **Uptime**
+ * Traefik server uptime
+
+### configuration
+
+Needs only `url` to server's `health`
+
+Here is an example for local server:
+
+```yaml
+update_every : 1
+priority : 60000
+
+local:
+ url : 'http://localhost:8080/health'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/health`.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Ftraefik%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
new file mode 100644
index 0000000..570339d
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+# Description: traefik netdata python.d module
+# Author: Alexandre Menezes (@ale_menezes)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import defaultdict
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+
+ORDER = [
+ 'response_statuses',
+ 'response_codes',
+ 'detailed_response_codes',
+ 'requests',
+ 'total_response_time',
+ 'average_response_time',
+ 'average_response_time_per_iteration',
+ 'uptime'
+]
+
+CHARTS = {
+ 'response_statuses': {
+ 'options': [None, 'Response statuses', 'requests/s', 'responses', 'traefik.response_statuses', 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental'],
+ ['server_errors', 'error', 'incremental'],
+ ['redirects', 'redirect', 'incremental'],
+ ['bad_requests', 'bad', 'incremental'],
+ ['other_requests', 'other', 'incremental']
+ ]
+ },
+ 'response_codes': {
+ 'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['other', None, 'incremental']
+ ]
+ },
+ 'detailed_response_codes': {
+ 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes',
+ 'stacked'],
+ 'lines': []
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'],
+ 'lines': [
+ ['total_count', 'requests', 'incremental']
+ ]
+ },
+ 'total_response_time': {
+ 'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'],
+ 'lines': [
+ ['total_response_time_sec', 'response', 'absolute', 1, 10000]
+ ]
+ },
+ 'average_response_time': {
+ 'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'],
+ 'lines': [
+ ['average_response_time_sec', 'response', 'absolute', 1, 1000]
+ ]
+ },
+ 'average_response_time_per_iteration': {
+ 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings',
+ 'traefik.average_response_time_per_iteration', 'line'],
+ 'lines': [
+ ['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000]
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'],
+ 'lines': [
+ ['uptime_sec', 'uptime', 'absolute']
+ ]
+ }
+}
+
+HEALTH_STATS = [
+ 'uptime_sec',
+ 'average_response_time_sec',
+ 'total_response_time_sec',
+ 'total_count',
+ 'total_status_code_count'
+]
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', 'http://localhost:8080/health')
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.last_total_response_time = 0
+ self.last_total_count = 0
+ self.data = {
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'average_response_time_per_iteration_sec': 0,
+ }
+
+ def _get_data(self):
+ data = self._get_raw_data()
+
+ if not data:
+ return None
+
+ data = loads(data)
+
+ self.get_data_per_code_status(raw_data=data)
+
+ self.get_data_per_code_family(raw_data=data)
+
+ self.get_data_per_code(raw_data=data)
+
+ self.data.update(fetch_data_(raw_data=data, metrics=HEALTH_STATS))
+
+ self.data['average_response_time_sec'] *= 1000000
+ self.data['total_response_time_sec'] *= 10000
+ if data['total_count'] != self.last_total_count:
+ self.data['average_response_time_per_iteration_sec'] = \
+ (data['total_response_time_sec'] - self.last_total_response_time) * \
+ 1000000 / (data['total_count'] - self.last_total_count)
+ else:
+ self.data['average_response_time_per_iteration_sec'] = 0
+ self.last_total_response_time = data['total_response_time_sec']
+ self.last_total_count = data['total_count']
+
+ return self.data or None
+
+ def get_data_per_code_status(self, raw_data):
+ data = defaultdict(int)
+ for code, value in raw_data['total_status_code_count'].items():
+ code_prefix = code[0]
+ if code_prefix == '1' or code_prefix == '2' or code == '304':
+ data['successful_requests'] += value
+ elif code_prefix == '3':
+ data['redirects'] += value
+ elif code_prefix == '4':
+ data['bad_requests'] += value
+ elif code_prefix == '5':
+ data['server_errors'] += value
+ else:
+ data['other_requests'] += value
+ self.data.update(data)
+
+ def get_data_per_code_family(self, raw_data):
+ data = defaultdict(int)
+ for code, value in raw_data['total_status_code_count'].items():
+ code_prefix = code[0]
+ if code_prefix == '1':
+ data['1xx'] += value
+ elif code_prefix == '2':
+ data['2xx'] += value
+ elif code_prefix == '3':
+ data['3xx'] += value
+ elif code_prefix == '4':
+ data['4xx'] += value
+ elif code_prefix == '5':
+ data['5xx'] += value
+ else:
+ data['other'] += value
+ self.data.update(data)
+
+ def get_data_per_code(self, raw_data):
+ for code, value in raw_data['total_status_code_count'].items():
+ if self.charts:
+ if code not in self.data:
+ self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
+ self.data[code] = value
+
+
+def fetch_data_(raw_data, metrics):
+ data = dict()
+
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+
+ return data
diff --git a/collectors/python.d.plugin/traefik/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf
new file mode 100644
index 0000000..e3f182d
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/traefik.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for traefik health data API
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, traefik plugin also supports the following:
+#
+# url: '<scheme>://<host>:<port>/<health_page_api>'
+# # http://localhost:8080/health
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ url: 'http://localhost:8080/health'
diff --git a/collectors/python.d.plugin/unbound/Makefile.inc b/collectors/python.d.plugin/unbound/Makefile.inc
new file mode 100644
index 0000000..59c306a
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += unbound/unbound.chart.py
+dist_pythonconfig_DATA += unbound/unbound.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += unbound/README.md unbound/Makefile.inc
+
diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md
new file mode 100644
index 0000000..e213683
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/README.md
@@ -0,0 +1,78 @@
+# unbound
+
+Monitoring uses the remote control interface to fetch statistics.
+
+Provides the following charts:
+
+1. **Queries Processed**
+ * Ratelimited
+ * Cache Misses
+ * Cache Hits
+ * Expired
+ * Prefetched
+ * Recursive
+
+2. **Request List**
+ * Average Size
+ * Max Size
+ * Overwritten Requests
+ * Overruns
+ * Current Size
+ * User Requests
+
+3. **Recursion Timings**
+ * Average recursion processing time
+ * Median recursion processing time
+
+If extended stats are enabled, also provides:
+
+4. **Cache Sizes**
+ * Message Cache
+ * RRset Cache
+ * Infra Cache
+ * DNSSEC Key Cache
+ * DNSCrypt Shared Secret Cache
+ * DNSCrypt Nonce Cache
+
+### configuration
+
+Unbound must be manually configured to enable the remote-control protocol.
+Check the Unbound documentation for info on how to do this. Additionally,
+if you want to take advantage of the autodetection this plugin offers,
+you will need to make sure your `unbound.conf` file only uses spaces for
+indentation (the default config shipped by most distributions uses tabs
+instead of spaces).
+
+Once you have the Unbound control protocol enabled, you need to make sure
+that either the certificate and key are readable by Netdata (if you're
+using the regular control interface), or that the socket is accessible
+to Netdata (if you're using a UNIX socket for the contorl interface).
+
+By default, for the local system, everything can be auto-detected
+assuming Unbound is configured correctly and has been told to listen
+on the loopback interface or a UNIX socket. This is done by looking
+up info in the Unbound config file specified by the `ubconf` key.
+
+To enable extended stats for a given job, add `extended: yes` to the
+definition.
+
+You can also enable per-thread charts for a given job by adding
+`per_thread: yes` to the definition. Note that the numbe rof threads
+is only checked on startup.
+
+A basic local configuration with extended statistics and per-thread
+charts looks like this:
+
+```yaml
+local:
+ ubconf: /etc/unbound/unbound.conf
+ extended: yes
+ per_thread: yes
+```
+
+While it's a bit more complicated to set up correctly, it is recommended
+that you use a UNIX socket as it provides far better performance.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Funbound%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py
new file mode 100644
index 0000000..adb58d4
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/unbound.chart.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+# Description: unbound netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import sys
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+from bases.loaders import YamlOrderedLoader
+
+PRECISION = 1000
+
+ORDER = [
+ 'queries',
+ 'recursion',
+ 'reqlist',
+]
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'Queries Processed', 'queries', 'Unbound', 'unbound.queries', 'line'],
+ 'lines': [
+ ['ratelimit', 'ratelimited', 'absolute', 1, 1],
+ ['cachemiss', 'cache_miss', 'absolute', 1, 1],
+ ['cachehit', 'cache_hit', 'absolute', 1, 1],
+ ['expired', 'expired', 'absolute', 1, 1],
+ ['prefetch', 'prefetched', 'absolute', 1, 1],
+ ['recursive', 'recursive', 'absolute', 1, 1]
+ ]
+ },
+ 'recursion': {
+ 'options': [None, 'Recursion Timings', 'seconds', 'Unbound', 'unbound.recursion', 'line'],
+ 'lines': [
+ ['recursive_avg', 'average', 'absolute', 1, PRECISION],
+ ['recursive_med', 'median', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'reqlist': {
+ 'options': [None, 'Request List', 'items', 'Unbound', 'unbound.reqlist', 'line'],
+ 'lines': [
+ ['reqlist_avg', 'average_size', 'absolute', 1, 1],
+ ['reqlist_max', 'maximum_size', 'absolute', 1, 1],
+ ['reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
+ ['reqlist_exceeded', 'overruns', 'absolute', 1, 1],
+ ['reqlist_current', 'current_size', 'absolute', 1, 1],
+ ['reqlist_user', 'user_requests', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# These get added too if we are told to use extended stats.
+EXTENDED_ORDER = ['cache']
+
+EXTENDED_CHARTS = {
+ 'cache': {
+ 'options': [None, 'Cache Sizes', 'items', 'Unbound', 'unbound.cache', 'stacked'],
+ 'lines': [
+ ['cache_message', 'message_cache', 'absolute', 1, 1],
+ ['cache_rrset', 'rrset_cache', 'absolute', 1, 1],
+ ['cache_infra', 'infra_cache', 'absolute', 1, 1],
+ ['cache_key', 'dnssec_key_cache', 'absolute', 1, 1],
+ ['cache_dnscss', 'dnscrypt_Shared_Secret_cache', 'absolute', 1, 1],
+ ['cache_dnscn', 'dnscrypt_Nonce_cache', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# This is used as a templates for the per-thread charts.
+PER_THREAD_CHARTS = {
+ '_queries': {
+ 'options': [None, '{longname} Queries Processed', 'queries', 'Queries Processed',
+ 'unbound.threads.queries', 'line'],
+ 'lines': [
+ ['{shortname}_ratelimit', 'ratelimited', 'absolute', 1, 1],
+ ['{shortname}_cachemiss', 'cache_miss', 'absolute', 1, 1],
+ ['{shortname}_cachehit', 'cache_hit', 'absolute', 1, 1],
+ ['{shortname}_expired', 'expired', 'absolute', 1, 1],
+ ['{shortname}_prefetch', 'prefetched', 'absolute', 1, 1],
+ ['{shortname}_recursive', 'recursive', 'absolute', 1, 1]
+ ]
+ },
+ '_recursion': {
+ 'options': [None, '{longname} Recursion Timings', 'seconds', 'Recursive Timings',
+ 'unbound.threads.recursion', 'line'],
+ 'lines': [
+ ['{shortname}_recursive_avg', 'average', 'absolute', 1, PRECISION],
+ ['{shortname}_recursive_med', 'median', 'absolute', 1, PRECISION]
+ ]
+ },
+ '_reqlist': {
+ 'options': [None, '{longname} Request List', 'items', 'Request List', 'unbound.threads.reqlist', 'line'],
+ 'lines': [
+ ['{shortname}_reqlist_avg', 'average_size', 'absolute', 1, 1],
+ ['{shortname}_reqlist_max', 'maximum_size', 'absolute', 1, 1],
+ ['{shortname}_reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1],
+ ['{shortname}_reqlist_exceeded', 'overruns', 'absolute', 1, 1],
+ ['{shortname}_reqlist_current', 'current_size', 'absolute', 1, 1],
+ ['{shortname}_reqlist_user', 'user_requests', 'absolute', 1, 1]
+ ]
+ }
+}
+
+
+# This maps the Unbound stat names to our names and precision requiremnets.
+STAT_MAP = {
+ 'total.num.queries_ip_ratelimited': ('ratelimit', 1),
+ 'total.num.cachehits': ('cachehit', 1),
+ 'total.num.cachemiss': ('cachemiss', 1),
+ 'total.num.zero_ttl': ('expired', 1),
+ 'total.num.prefetch': ('prefetch', 1),
+ 'total.num.recursivereplies': ('recursive', 1),
+ 'total.requestlist.avg': ('reqlist_avg', 1),
+ 'total.requestlist.max': ('reqlist_max', 1),
+ 'total.requestlist.overwritten': ('reqlist_overwritten', 1),
+ 'total.requestlist.exceeded': ('reqlist_exceeded', 1),
+ 'total.requestlist.current.all': ('reqlist_current', 1),
+ 'total.requestlist.current.user': ('reqlist_user', 1),
+ 'total.recursion.time.avg': ('recursive_avg', PRECISION),
+ 'total.recursion.time.median': ('recursive_med', PRECISION),
+ 'msg.cache.count': ('cache_message', 1),
+ 'rrset.cache.count': ('cache_rrset', 1),
+ 'infra.cache.count': ('cache_infra', 1),
+ 'key.cache.count': ('cache_key', 1),
+ 'dnscrypt_shared_secret.cache.count': ('cache_dnscss', 1),
+ 'dnscrypt_nonce.cache.count': ('cache_dnscn', 1)
+}
+
+# Same as above, but for per-thread stats.
+PER_THREAD_STAT_MAP = {
+ '{shortname}.num.queries_ip_ratelimited': ('{shortname}_ratelimit', 1),
+ '{shortname}.num.cachehits': ('{shortname}_cachehit', 1),
+ '{shortname}.num.cachemiss': ('{shortname}_cachemiss', 1),
+ '{shortname}.num.zero_ttl': ('{shortname}_expired', 1),
+ '{shortname}.num.prefetch': ('{shortname}_prefetch', 1),
+ '{shortname}.num.recursivereplies': ('{shortname}_recursive', 1),
+ '{shortname}.requestlist.avg': ('{shortname}_reqlist_avg', 1),
+ '{shortname}.requestlist.max': ('{shortname}_reqlist_max', 1),
+ '{shortname}.requestlist.overwritten': ('{shortname}_reqlist_overwritten', 1),
+ '{shortname}.requestlist.exceeded': ('{shortname}_reqlist_exceeded', 1),
+ '{shortname}.requestlist.current.all': ('{shortname}_reqlist_current', 1),
+ '{shortname}.requestlist.current.user': ('{shortname}_reqlist_user', 1),
+ '{shortname}.recursion.time.avg': ('{shortname}_recursive_avg', PRECISION),
+ '{shortname}.recursion.time.median': ('{shortname}_recursive_med', PRECISION)
+}
+
+
+# Used to actually generate per-thread charts.
+def _get_perthread_info(thread):
+ sname = 'thread{0}'.format(thread)
+ lname = 'Thread {0}'.format(thread)
+ charts = dict()
+ order = []
+ statmap = dict()
+
+ for item in PER_THREAD_CHARTS:
+ cname = '{0}{1}'.format(sname, item)
+ chart = deepcopy(PER_THREAD_CHARTS[item])
+ chart['options'][1] = chart['options'][1].format(longname=lname)
+
+ for index, line in enumerate(chart['lines']):
+ chart['lines'][index][0] = line[0].format(shortname=sname)
+
+ order.append(cname)
+ charts[cname] = chart
+
+ for key, value in PER_THREAD_STAT_MAP.items():
+ statmap[key.format(shortname=sname)] = (value[0].format(shortname=sname), value[1])
+
+ return (charts, order, statmap)
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ # The unbound control protocol is always TLS encapsulated
+ # unless it's used over a UNIX socket, so enable TLS _before_
+ # doing the normal SocketService initialization.
+ configuration['tls'] = True
+ self.port = 8935
+ SocketService.__init__(self, configuration, name)
+ self.ext = self.configuration.get('extended', None)
+ self.ubconf = self.configuration.get('ubconf', None)
+ self.perthread = self.configuration.get('per_thread', False)
+ self.threads = None
+ self.order = deepcopy(ORDER)
+ self.definitions = deepcopy(CHARTS)
+ self.request = 'UBCT1 stats\n'
+ self.statmap = deepcopy(STAT_MAP)
+ self._parse_config()
+ self._auto_config()
+ self.debug('Extended stats: {0}'.format(self.ext))
+ self.debug('Per-thread stats: {0}'.format(self.perthread))
+ if self.ext:
+ self.order = self.order + EXTENDED_ORDER
+ self.definitions.update(EXTENDED_CHARTS)
+ if self.unix_socket:
+ self.debug('Using unix socket: {0}'.format(self.unix_socket))
+ else:
+ self.debug('Connecting to: {0}:{1}'.format(self.host, self.port))
+ self.debug('Using key: {0}'.format(self.key))
+ self.debug('Using certificate: {0}'.format(self.cert))
+
+ def _auto_config(self):
+ if self.ubconf and os.access(self.ubconf, os.R_OK):
+ self.debug('Unbound config: {0}'.format(self.ubconf))
+ conf = YamlOrderedLoader.load_config_from_file(self.ubconf)[0]
+ if self.ext is None:
+ if 'extended-statistics' in conf['server']:
+ self.ext = conf['server']['extended-statistics']
+ if 'remote-control' in conf:
+ if conf['remote-control'].get('control-use-cert', False):
+ self.key = self.key or conf['remote-control'].get('control-key-file')
+ self.cert = self.cert or conf['remote-control'].get('control-cert-file')
+ self.port = self.port or conf['remote-control'].get('control-port')
+ else:
+ self.unix_socket = self.unix_socket or conf['remote-control'].get('control-interface')
+ else:
+ self.debug('Unbound configuration not found.')
+ if not self.key:
+ self.key = '/etc/unbound/unbound_control.key'
+ if not self.cert:
+ self.cert = '/etc/unbound/unbound_control.pem'
+ if not self.port:
+ self.port = 8953
+
+ def _generate_perthread_charts(self):
+ tmporder = list()
+ for thread in range(0, self.threads):
+ charts, order, statmap = _get_perthread_info(thread)
+ tmporder.extend(order)
+ self.definitions.update(charts)
+ self.statmap.update(statmap)
+ self.order.extend(sorted(tmporder))
+
+ def check(self):
+ # Check if authentication is working.
+ self._connect()
+ result = bool(self._sock)
+ self._disconnect()
+ # If auth works, and we need per-thread charts, query the server
+ # to see how many threads it's using. This somewhat abuses the
+ # SocketService API to get the data we need.
+ if result and self.perthread:
+ tmp = self.request
+ if sys.version_info[0] < 3:
+ self.request = 'UBCT1 status\n'
+ else:
+ self.request = b'UBCT1 status\n'
+ raw = self._get_raw_data()
+ for line in raw.splitlines():
+ if line.startswith('threads'):
+ self.threads = int(line.split()[1])
+ self._generate_perthread_charts()
+ break
+ if self.threads is None:
+ self.info('Unable to auto-detect thread counts, disabling per-thread stats.')
+ self.perthread = False
+ self.request = tmp
+ return result
+
+ @staticmethod
+ def _check_raw_data(data):
+ # The server will close the connection when it's done sending
+ # data, so just keep looping until that happens.
+ return False
+
+ def _get_data(self):
+ raw = self._get_raw_data()
+ data = dict()
+ tmp = dict()
+ for line in raw.splitlines():
+ stat = line.split('=')
+ tmp[stat[0]] = stat[1]
+ for item in self.statmap:
+ if item in tmp:
+ data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1]
+ return data
diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf
new file mode 100644
index 0000000..6856136
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/unbound.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for unbound
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, unbound also supports the following:
+#
+# host: localhost # The host to connect to.
+# port: 8953 # WHat port to use (defaults to 8953)
+# socket: /path/to/socket # A path to a UNIX socket to use instead
+# # of a TCP connection
+# tls_key_file: /path/to/key # The key file to use for authentication
+# tls_cert_file: /path/to/key # The certificate to use for authentication
+# extended: false # Whether to collect extended stats or not
+# per_thread: false # Whether to show charts for per-thread stats
+#
+# In addition to the above, you can set the following to try and
+# auto-detect most settings based on the unbound configuration:
+#
+# ubconf: /etc/unbound/unbound.conf
+#
+# Note that the SSL key and certificate need to be readable by the user
+# unbound runs as if you're using the regular control interface.
+# If you're using a UNIX socket, that has to be readable by the netdata user.
+
+# The following should work for most users if they have unbound configured
+# correctly.
+local:
+ ubconf: /etc/unbound/unbound.conf
diff --git a/collectors/python.d.plugin/uwsgi/Makefile.inc b/collectors/python.d.plugin/uwsgi/Makefile.inc
new file mode 100644
index 0000000..75d96de
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += uwsgi/uwsgi.chart.py
+dist_pythonconfig_DATA += uwsgi/uwsgi.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += uwsgi/README.md uwsgi/Makefile.inc
+
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
new file mode 100644
index 0000000..9d455cf
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -0,0 +1,39 @@
+# uwsgi
+
+Module monitor uwsgi performance metrics.
+
+https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
+
+lines are creates dynamically based on how many workers are there
+
+Following charts are drawn:
+
+1. **Requests**
+ * requests per second
+ * transmitted data
+ * average request time
+
+2. **Memory**
+ * rss
+ * vsz
+
+3. **Exceptions**
+4. **Harakiris**
+5. **Respawns**
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:1717`.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fuwsgi%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
new file mode 100644
index 0000000..511b770
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Description: uwsgi netdata python.d module
+# Author: Robbert Segeren (robbert-ef)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+from copy import deepcopy
+from bases.FrameworkServices.SocketService import SocketService
+
+
+ORDER = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+ 'exceptions',
+ 'harakiri',
+ 'respawn',
+]
+
+DYNAMIC_CHARTS = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+]
+
+# NOTE: lines are created dynamically in `check()` method
+CHARTS = {
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'uwsgi.requests', 'stacked'],
+ 'lines': [
+ ['requests', 'requests', 'incremental']
+ ]
+ },
+ 'tx': {
+ 'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'],
+ 'lines': [
+ ['tx', 'tx', 'incremental']
+ ]
+ },
+ 'avg_rt': {
+ 'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'],
+ 'lines': [
+ ['avg_rt', 'avg_rt', 'absolute']
+ ]
+ },
+ 'memory_rss': {
+ 'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'],
+ 'lines': [
+ ['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'memory_vsz': {
+ 'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
+ 'lines': [
+ ['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'exceptions': {
+ 'options': [None, 'Exceptions', 'exceptions', 'exceptions', 'uwsgi.exceptions', 'line'],
+ 'lines': [
+ ['exceptions', 'exceptions', 'incremental']
+ ]
+ },
+ 'harakiri': {
+ 'options': [None, 'Harakiris', 'harakiris', 'harakiris', 'uwsgi.harakiris', 'line'],
+ 'lines': [
+ ['harakiri_count', 'harakiris', 'incremental']
+ ]
+ },
+ 'respawn': {
+ 'options': [None, 'Respawns', 'respawns', 'respawns', 'uwsgi.respawns', 'line'],
+ 'lines': [
+ ['respawn_count', 'respawns', 'incremental']
+ ]
+ },
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.url = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 1717)
+ # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
+ for chart in DYNAMIC_CHARTS:
+ self.definitions[chart]['lines'] = []
+ self.last_result = {}
+ self.workers = []
+
+ def read_data(self):
+ """
+ Read data from socket and parse as JSON.
+ :return: (dict) stats
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ try:
+ return json.loads(raw_data)
+ except ValueError as err:
+ self.error(err)
+ return None
+
+ def check(self):
+ """
+ Parse configuration and check if we can read data.
+ :return: boolean
+ """
+ self._parse_config()
+ return bool(self.read_data())
+
+ def add_worker_dimensions(self, key):
+ """
+ Helper to add dimensions for a worker.
+ :param key: (int or str) worker identifier
+ :return:
+ """
+ for chart in DYNAMIC_CHARTS:
+ for line in CHARTS[chart]['lines']:
+ dimension_id = '{}_{}'.format(line[0], key)
+ dimension_name = str(key)
+
+ dimension = [dimension_id, dimension_name] + line[2:]
+ self.charts[chart].add_dimension(dimension)
+
+ @staticmethod
+ def _check_raw_data(data):
+ # The server will close the connection when it's done sending
+ # data, so just keep looping until that happens.
+ return False
+
+ def _get_data(self):
+ """
+ Read data from socket
+ :return: dict
+ """
+ stats = self.read_data()
+ if not stats:
+ return None
+
+ result = {
+ 'exceptions': 0,
+ 'harakiri_count': 0,
+ 'respawn_count': 0,
+ }
+
+ for worker in stats['workers']:
+ key = worker['pid']
+
+ # Add dimensions for new workers
+ if key not in self.workers:
+ self.add_worker_dimensions(key)
+ self.workers.append(key)
+
+ result['requests_{}'.format(key)] = worker['requests']
+ result['tx_{}'.format(key)] = worker['tx']
+ result['avg_rt_{}'.format(key)] = worker['avg_rt']
+
+ # avg_rt is not reset by uwsgi, so reset here
+ if self.last_result.get('requests_{}'.format(key)) == worker['requests']:
+ result['avg_rt_{}'.format(key)] = 0
+
+ result['memory_rss_{}'.format(key)] = worker['rss']
+ result['memory_vsz_{}'.format(key)] = worker['vsz']
+
+ result['exceptions'] += worker['exceptions']
+ result['harakiri_count'] += worker['harakiri_count']
+ result['respawn_count'] += worker['respawn_count']
+
+ self.last_result = result
+ return result
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf
new file mode 100644
index 0000000..7d09e73
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.conf
@@ -0,0 +1,92 @@
+# netdata python.d.plugin configuration for uwsgi
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, uwsgi also supports the following:
+#
+# socket: 'path/to/uwsgistats.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 1717
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 1717
diff --git a/collectors/python.d.plugin/varnish/Makefile.inc b/collectors/python.d.plugin/varnish/Makefile.inc
new file mode 100644
index 0000000..2469b05
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += varnish/varnish.chart.py
+dist_pythonconfig_DATA += varnish/varnish.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += varnish/README.md varnish/Makefile.inc
+
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
new file mode 100644
index 0000000..44d64ef
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -0,0 +1,77 @@
+# varnish
+
+Module uses the `varnishstat` command to provide varnish cache statistics.
+
+It produces:
+
+1. **Connections Statistics** in connections/s
+ * accepted
+ * dropped
+
+2. **Client Requests** in requests/s
+ * received
+
+3. **All History Hit Rate Ratio** in percent
+ * hit
+ * miss
+ * hitpass
+
+4. **Current Poll Hit Rate Ratio** in percent
+ * hit
+ * miss
+ * hitpass
+
+5. **Expired Objects** in expired/s
+ * objects
+
+6. **Least Recently Used Nuked Objects** in nuked/s
+ * objects
+
+
+7. **Number Of Threads In All Pools** in threads
+ * threads
+
+8. **Threads Statistics** in threads/s
+ * created
+ * failed
+ * limited
+
+9. **Current Queue Length** in requests
+ * in queue
+
+10. **Backend Connections Statistics** in connections/s
+ * successful
+ * unhealthy
+ * reused
+ * closed
+ * resycled
+ * failed
+
+10. **Requests To The Backend** in requests/s
+ * received
+
+11. **ESI Statistics** in problems/s
+ * errors
+ * warnings
+
+12. **Memory Usage** in MB
+ * free
+ * allocated
+
+13. **Uptime** in seconds
+ * uptime
+
+
+### configuration
+
+Only one parameter is supported:
+
+```yaml
+instance_name: 'name'
+```
+
+The name of the varnishd instance to get logs from. If not specified, the host name is used.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fvarnish%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
new file mode 100644
index 0000000..da67815
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -0,0 +1,262 @@
+# -*- coding: utf-8 -*-
+# Description: varnish netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.collection import find_binary
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+
+ORDER = [
+ 'session_connections',
+ 'client_requests',
+ 'all_time_hit_rate',
+ 'current_poll_hit_rate',
+ 'cached_objects_expired',
+ 'cached_objects_nuked',
+ 'threads_total',
+ 'threads_statistics',
+ 'threads_queue_len',
+ 'backend_connections',
+ 'backend_requests',
+ 'esi_statistics',
+ 'memory_usage',
+ 'uptime'
+]
+
+CHARTS = {
+ 'session_connections': {
+ 'options': [None, 'Connections Statistics', 'connections/s',
+ 'client metrics', 'varnish.session_connection', 'line'],
+ 'lines': [
+ ['sess_conn', 'accepted', 'incremental'],
+ ['sess_dropped', 'dropped', 'incremental']
+ ]
+ },
+ 'client_requests': {
+ 'options': [None, 'Client Requests', 'requests/s',
+ 'client metrics', 'varnish.client_requests', 'line'],
+ 'lines': [
+ ['client_req', 'received', 'incremental']
+ ]
+ },
+ 'all_time_hit_rate': {
+ 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
+ 'varnish.all_time_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-absolute-row'],
+ ['cache_miss', 'miss', 'percentage-of-absolute-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
+ },
+ 'current_poll_hit_rate': {
+ 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
+ 'varnish.current_poll_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-incremental-row'],
+ ['cache_miss', 'miss', 'percentage-of-incremental-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row']
+ ]
+ },
+ 'cached_objects_expired': {
+ 'options': [None, 'Expired Objects', 'expired/s', 'cache performance',
+ 'varnish.cached_objects_expired', 'line'],
+ 'lines': [
+ ['n_expired', 'objects', 'incremental']
+ ]
+ },
+ 'cached_objects_nuked': {
+ 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance',
+ 'varnish.cached_objects_nuked', 'line'],
+ 'lines': [
+ ['n_lru_nuked', 'objects', 'incremental']
+ ]
+ },
+ 'threads_total': {
+ 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics',
+ 'varnish.threads_total', 'line'],
+ 'lines': [
+ ['threads', None, 'absolute']
+ ]
+ },
+ 'threads_statistics': {
+ 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics',
+ 'varnish.threads_statistics', 'line'],
+ 'lines': [
+ ['threads_created', 'created', 'incremental'],
+ ['threads_failed', 'failed', 'incremental'],
+ ['threads_limited', 'limited', 'incremental']
+ ]
+ },
+ 'threads_queue_len': {
+ 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics',
+ 'varnish.threads_queue_len', 'line'],
+ 'lines': [
+ ['thread_queue_len', 'in queue']
+ ]
+ },
+ 'backend_connections': {
+ 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics',
+ 'varnish.backend_connections', 'line'],
+ 'lines': [
+ ['backend_conn', 'successful', 'incremental'],
+ ['backend_unhealthy', 'unhealthy', 'incremental'],
+ ['backend_reuse', 'reused', 'incremental'],
+ ['backend_toolate', 'closed', 'incremental'],
+ ['backend_recycle', 'resycled', 'incremental'],
+ ['backend_fail', 'failed', 'incremental']
+ ]
+ },
+ 'backend_requests': {
+ 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics',
+ 'varnish.backend_requests', 'line'],
+ 'lines': [
+ ['backend_req', 'sent', 'incremental']
+ ]
+ },
+ 'esi_statistics': {
+ 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'],
+ 'lines': [
+ ['esi_errors', 'errors', 'incremental'],
+ ['esi_warnings', 'warnings', 'incremental']
+ ]
+ },
+ 'memory_usage': {
+ 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
+ 'lines': [
+ ['memory_free', 'free', 'absolute', 1, 1 << 20],
+ ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
+ },
+ 'uptime': {
+ 'lines': [
+ ['uptime', None, 'absolute']
+ ],
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line']
+ }
+}
+
+VARNISHSTAT = 'varnishstat'
+
+
+class Parser:
+ _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
+ _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)')
+ _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
+
+ def __init__(self):
+ self.re_default = None
+ self.re_backend = None
+
+ def init(self, data):
+ data = ''.join(data)
+ parsed_main = Parser._default.findall(data)
+ if parsed_main:
+ self.re_default = Parser._default
+
+ parsed_backend = Parser._backend_new.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_new
+ else:
+ parsed_backend = Parser._backend_old.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_old
+
+ def server_stats(self, data):
+ return self.re_default.findall(''.join(data))
+
+ def backend_stats(self, data):
+ return self.re_backend.findall(''.join(data))
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.instance_name = configuration.get('instance_name')
+ self.parser = Parser()
+ self.command = None
+
+ def create_command(self):
+ varnishstat = find_binary(VARNISHSTAT)
+
+ if not varnishstat:
+ self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT))
+ return False
+
+ if self.instance_name:
+ self.command = [varnishstat, '-1', '-n', self.instance_name, '-t', '1']
+ else:
+ self.command = [varnishstat, '-1', '-t', '1']
+ return True
+
+ def check(self):
+ if not self.create_command():
+ return False
+
+ # STDOUT is not empty
+ reply = self._get_raw_data()
+ if not reply:
+ self.error("No output from 'varnishstat'. Is it running? Not enough privileges?")
+ return False
+
+ self.parser.init(reply)
+
+ # Output is parsable
+ if not self.parser.re_default:
+ self.error('Cant parse the output...')
+ return False
+
+ if self.parser.re_backend:
+ backends = [b[0] for b in self.parser.backend_stats(reply)[::2]]
+ self.create_backends_charts(backends)
+ return True
+
+ def get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ data = dict()
+ server_stats = self.parser.server_stats(raw)
+ if not server_stats:
+ return None
+
+ if self.parser.re_backend:
+ backend_stats = self.parser.backend_stats(raw)
+ data.update(dict(('_'.join([name, param]), value) for name, param, value in backend_stats))
+
+ data.update(dict((param, value) for _, param, value in server_stats))
+
+ # varnish 5 uses default.g_bytes and default.g_space
+ data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
+ data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
+
+ return data
+
+ def create_backends_charts(self, backends):
+ for backend in backends:
+ chart_name = ''.join([backend, '_response_statistics'])
+ title = 'Backend "{0}"'.format(backend.capitalize())
+ hdr_bytes = ''.join([backend, '_beresp_hdrbytes'])
+ body_bytes = ''.join([backend, '_beresp_bodybytes'])
+
+ chart = {
+ chart_name:
+ {
+ 'options': [None, title, 'kilobits/s', 'backend response statistics',
+ 'varnish.backend', 'area'],
+ 'lines': [
+ [hdr_bytes, 'header', 'incremental', 8, 1000],
+ [body_bytes, 'body', 'incremental', -8, 1000]
+ ]
+ }
+ }
+
+ self.order.insert(0, chart_name)
+ self.definitions.update(chart)
diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
new file mode 100644
index 0000000..54bfe4d
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for varnish
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, varnish also supports the following:
+#
+# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/w1sensor/Makefile.inc b/collectors/python.d.plugin/w1sensor/Makefile.inc
new file mode 100644
index 0000000..bddf146
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += w1sensor/w1sensor.chart.py
+dist_pythonconfig_DATA += w1sensor/w1sensor.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += w1sensor/README.md w1sensor/Makefile.inc
+
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
new file mode 100644
index 0000000..94717c8
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -0,0 +1,15 @@
+# w1sensor
+
+Data from 1-Wire sensors.
+On Linux these are supported by the wire, w1_gpio, and w1_therm modules.
+Currently temperature sensors are supported and automatically detected.
+
+Charts are created dynamically based on the number of detected sensors.
+
+### configuration
+
+For detailed configuration information please read [`w1sensor.conf`](w1sensor.conf) file.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fw1sensor%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
new file mode 100644
index 0000000..e50312f
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Description: 1-wire temperature monitor netdata python.d module
+# Author: Diomidis Spinellis <http://www.spinellis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+# Location where 1-Wire devices can be found
+W1_DIR = '/sys/bus/w1/devices/'
+
+# Lines matching the following regular expression contain a temperature value
+RE_TEMP = re.compile(r' t=(\d+)')
+
+ORDER = [
+ 'temp',
+]
+
+CHARTS = {
+ 'temp': {
+ 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'],
+ 'lines': []
+ }
+}
+
+# Known and supported family members
+# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c
+THERM_FAMILY = {
+ '10': 'W1_THERM_DS18S20',
+ '22': 'W1_THERM_DS1822',
+ '28': 'W1_THERM_DS18B20',
+ '3b': 'W1_THERM_DS1825',
+ '42': 'W1_THERM_DS28EA00',
+}
+
+
+class Service(SimpleService):
+ """Provide netdata service for 1-Wire sensors"""
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.probes = []
+
+ def check(self):
+ """Auto-detect available 1-Wire sensors, setting line definitions
+ and probes to be monitored."""
+ try:
+ file_names = os.listdir(W1_DIR)
+ except OSError as err:
+ self.error(err)
+ return False
+
+ lines = []
+ for file_name in file_names:
+ if file_name[2] != '-':
+ continue
+ if not file_name[0:2] in THERM_FAMILY:
+ continue
+
+ self.probes.append(file_name)
+ identifier = file_name[3:]
+ name = identifier
+ config_name = self.configuration.get('name_' + identifier)
+ if config_name:
+ name = config_name
+ lines.append(['w1sensor_temp_' + identifier, name, 'absolute',
+ 1, 10])
+ self.definitions['temp']['lines'] = lines
+ return len(self.probes) > 0
+
+ def get_data(self):
+ """Return data read from sensors."""
+ data = dict()
+
+ for file_name in self.probes:
+ file_path = W1_DIR + file_name + '/w1_slave'
+ identifier = file_name[3:]
+ try:
+ with open(file_path, 'r') as device_file:
+ for line in device_file:
+ matched = RE_TEMP.search(line)
+ if matched:
+ # Round to one decimal digit to filter-out noise
+ value = round(int(matched.group(1)) / 1000., 1)
+ value = int(value * 10)
+ data['w1sensor_temp_' + identifier] = value
+ except (OSError, IOError) as err:
+ self.error(err)
+ continue
+ return data or None
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf
new file mode 100644
index 0000000..1727100
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for w1sensor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 5 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# name_<1-Wire id>: '<human readable name>'
+# This allows associating a human readable name with a sensor's 1-Wire
+# identifier. Example:
+# name_00000022276e: 'Machine room'
+# name_00000022298f: 'Rack 12'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/web_log/Makefile.inc b/collectors/python.d.plugin/web_log/Makefile.inc
new file mode 100644
index 0000000..8931159
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += web_log/web_log.chart.py
+dist_pythonconfig_DATA += web_log/web_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += web_log/README.md web_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
new file mode 100644
index 0000000..176551c
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -0,0 +1,203 @@
+# web_log
+
+## Motivation
+
+Web server log files exist for more than 20 years. All web servers of all kinds, from all vendors, [since the time NCSA httpd was powering the web](https://en.wikipedia.org/wiki/NCSA_HTTPd), produce log files, saving in real-time all accesses to web sites and APIs.
+
+Yet, after the appearance of google analytics and similar services, and the recent rise of APM (Application Performance Monitoring) with sophisticated time-series databases that collect and analyze metrics at the application level, all these web server log files are mostly just filling our disks, rotated every night without any use whatsoever.
+
+netdata turns this "useless" log file, into a powerful performance and health monitoring tool, capable of detecting, **in real-time**, most common web server problems, such as:
+
+- too many redirects (i.e. **oops!** *this should not redirect clients to itself*)
+- too many bad requests (i.e. **oops!** *a few files were not uploaded*)
+- too many internal server errors (i.e. **oops!** *this release crashes too much*)
+- unreasonably too many requests (i.e. **oops!** *we are under attack*)
+- unreasonably few requests (i.e. **oops!** *call the network guys*)
+- unreasonably slow responses (i.e. **oops!** *the database is slow again*)
+- too few successful responses (i.e. **oops!** *help us God!*)
+
+## Usage
+
+If netdata is installed on a system running a web server, it will detect it and it will automatically present a series of charts, with information obtained from the web server API, like these (*these do not come from the web server log file*):
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22900686/e283f636-f237-11e6-93d2-cbdf63de150c.png)
+*[**netdata**](https://my-netdata.io/) charts based on metrics collected by querying the `nginx` API (i.e. `/stub_status`).*
+
+> [**netdata**](https://my-netdata.io/) supports `apache`, `nginx`, `lighttpd` and `tomcat`. To obtain real-time information from a web server API, the web server needs to expose it. For directions on configuring your web server, check the config files for each web server. There is a directory with a config file for each web server under [`/etc/netdata/python.d/`](../).
+
+## Configuration
+
+[**netdata**](https://my-netdata.io/) has a powerful `web_log` plugin, capable of incrementally parsing any number of web server log files. This plugin is automatically started with [**netdata**](https://my-netdata.io/) and comes, pre-configured, for finding web server log files on popular distributions. Its configuration is at [`/etc/netdata/python.d/web_log.conf`](web_log.conf), like this:
+
+
+```yaml
+nginx_log:
+ name : 'nginx_log'
+ path : '/var/log/nginx/access.log'
+
+apache_log:
+ name : 'apache_log'
+ path : '/var/log/apache/other_vhosts_access.log'
+ categories:
+ cacti : 'cacti.*'
+ observium : 'observium'
+```
+
+Theodule has preconfigured jobs for nginx, apache and gunicorn on various distros.
+You can add one such section, for each of your web server log files.
+
+> **Important**<br/>Keep in mind [**netdata**](https://my-netdata.io/) runs as user `netdata`. So, make sure user `netdata` has access to the logs directory and can read the log file.
+
+## Charts
+
+Once you have all log files configured and [**netdata**](https://my-netdata.io/) restarted, **for each log file** you will get a section at the [**netdata**](https://my-netdata.io/) dashboard, with the following charts.
+
+### responses by status
+
+In this chart we tried to provide a meaningful status for all responses. So:
+
+- `success` counts all the valid responses (i.e. `1xx` informational, `2xx` successful and `304` not modified).
+- `error` are `5xx` internal server errors. These are very bad, they mean your web site or API is facing difficulties.
+- `redirect` are `3xx` responses, except `304`. All `3xx` are redirects, but `304` means "not modified" - it tells the browsers the content they already have is still valid and can be used as-is. So, we decided to account it as a successful response.
+- `bad` are bad requests that cannot be served.
+- `other` as all the other, non-standard, types of responses.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902194/ea0affc6-f23c-11e6-85f1-a4951dd4bb40.png)
+
+### Responses by type
+
+Then, we group all responses by code family, without interpreting their meaning.
+**Response by type** requests/s
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22901883/dea7d33a-f23b-11e6-960d-00a913b58936.png)
+
+### Responses by code family
+
+Here we show all the response codes in detail.
+
+**Response by code family** requests/s
+ * 1xx (informational)
+ * 2xx (successful)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 5xx (internal server errors)
+ * other (non-standart responses)
+ * unmatched (the lines in the log file that are not matched)
+
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22901965/1a5d84ba-f23c-11e6-9d38-3deebcc8b879.png)
+
+>**Important**<br/>If your application is using hundreds of non-standard response codes, your browser may become slow while viewing this chart, so we have added a configuration [option to disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L63).
+
+### Detailed Response Codes
+
+Number of responses for each response code family individually (requests/s)
+
+### bandwidth
+
+This is a nice view of the traffic the web server is receiving and is sending.
+
+What is important to know for this chart, is that the bandwidth used for each request and response is accounted at the time the log is written. Since [**netdata**](https://my-netdata.io/) refreshes this chart every single second, you may have unrealistic spikes is the size of the requests or responses is too big. The reason is simple: a response may have needed 1 minute to be completed, but all the bandwidth used during that minute for the specific response will be accounted at the second the log line is written.
+
+As the legend on the chart suggests, you can use FireQoS to setup QoS on the web server ports and IPs to accurately measure the bandwidth the web server is using. Actually, [there may be a few more reasons to install QoS on your servers](../../tc.plugin/#tcplugin)...
+
+**Bandwidth** KB/s
+ * received (bandwidth of requests)
+ * send (bandwidth of responses)
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902266/245141d6-f23d-11e6-90f9-98729733e0da.png)
+
+> **Important**<br/>Most web servers do not log the request size by default.<br/>So, [unless you have configured your web server to log the size of requests](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), the `received` dimension will be always zero.
+
+### timings
+
+[**netdata**](https://my-netdata.io/) will also render the `minimum`, `average` and `maximum` time the web server needed to respond to requests.
+
+Keep in mind most web servers timings start at the reception of the full request, until the dispatch of the last byte of the response. So, they include network latencies of responses, but they do not include network latencies of requests.
+
+**Timings** ms (request processing time)
+ * min (bandwidth of requests)
+ * max (bandwidth of responses)
+ * average (bandwidth of responses)
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902283/369e3f92-f23d-11e6-9359-53e5d4ecb18e.png)
+
+> **Important**<br/>Most web servers do not log timing information by default.<br/>So, [unless you have configured your web server to also log timings](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L76-L89), this chart will not exist.
+
+### URL patterns
+
+This is a very interesting chart. It is configured entirely by you.
+
+[**netdata**](https://my-netdata.io/) can map the URLs found in the log file into categories. You can define these categories, by providing names and regular expressions in `web_log.conf`.
+
+So, this configuration:
+
+```yaml
+nginx_netdata: # name the charts
+ path: '/var/log/nginx/access.log' # web server log file
+ categories:
+ badges : '^/api/v1/badge\.svg'
+ charts : '^/api/v1/(data|chart|charts)'
+ registry : '^/api/v1/registry'
+ alarms : '^/api/v1/alarm'
+ allmetrics : '^/api/v1/allmetrics'
+ api_other : '^/api/'
+ netdata_conf: '^/netdata.conf'
+ api_old : '^/(data|datasource|graph|list|all\.json)'
+```
+
+Produces the following chart. The `categories` section is matched in the order given. So, pay attention to the order you give your patterns.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902302/4d25bf06-f23d-11e6-844d-18c0876bdc3d.png)
+
+### HTTP versions
+
+This chart breaks down requests by HTTP version used.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902323/5ee376d4-f23d-11e6-8457-157d3f438843.png)
+
+### IP versions
+
+This one provides requests per IP version used by the clients (`IPv4`, `IPv6`).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902370/7091a770-f23d-11e6-8cd2-74e9a67b1397.png)
+
+### Unique clients
+
+The last charts are about the unique IPs accessing your web server.
+
+**Current Poll Unique Client IPs** unique ips/s. This one counts the unique IPs for each data collection iteration (i.e. **unique clients per second**).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902384/835aa168-f23d-11e6-914f-cfc3f06eaff8.png)
+
+**All Time Unique Client IPs** unique ips/s. Counts the unique IPs, since the last [**netdata**](https://my-netdata.io/) restart.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/22902407/92dd27e6-f23d-11e6-900d-eede7bc08e64.png)
+
+>**Important**<br/>To provide this information `web_log` plugin keeps in memory all the IPs seen by the web server. Although this does not require so much memory, if you have a web server with several million unique client IPs, we suggest to [disable this chart](https://github.com/netdata/netdata/blob/419cd0a237275e5eeef3f92dcded84e735ee6c58/conf.d/python.d/web_log.conf#L64).
+
+
+## Alarms
+
+The magic of [**netdata**](https://my-netdata.io/) is that all metrics are collected per second, and all metrics can be used or correlated to provide real-time alarms. Out of the box, [**netdata**](https://my-netdata.io/) automatically attaches the [following alarms](../../../health/health.d/web_log.conf) to all `web_log` charts (i.e. to all log files configured, individually):
+
+alarm|description|minimum<br/>requests|warning|critical
+:-------|-------|:------:|:-----:|:------:
+`1m_redirects`|The ratio of HTTP redirects (3xx except 304) over all the requests, during the last minute.<br/>&nbsp;<br/>*Detects if the site or the web API is suffering from too many or circular redirects.*<br/>&nbsp;<br/>(i.e. **oops!** *this should not redirect clients to itself*)|120/min|&gt; 20%|&gt; 30%
+`1m_bad_requests`|The ratio of HTTP bad requests (4xx) over all the requests, during the last minute.<br/>&nbsp;<br/>*Detects if the site or the web API is receiving too many bad requests, including `404`, not found.*<br/>&nbsp;<br/>(i.e. **oops!** *a few files were not uploaded*)|120/min|&gt; 30%|&gt; 50%
+`1m_internal_errors`|The ratio of HTTP internal server errors (5xx), over all the requests, during the last minute.<br/>&nbsp;<br/>*Detects if the site is facing difficulties to serve requests.*<br/>&nbsp;<br/>(i.e. **oops!** *this release crashes too much*)|120/min|&gt; 2%|&gt; 5%
+`5m_requests_ratio`|The percentage of successful web requests of the last 5 minutes, compared with the previous 5 minutes.<br/>&nbsp;<br/>*Detects if the site or the web API is suddenly getting too many or too few requests.*<br/>&nbsp;<br/>(i.e. too many = **oops!** *we are under attack*)<br/>(i.e. too few = **oops!** *call the network guys*)|120/5min|&gt; double or &lt; half|&gt; 4x or &lt; 1/4x
+`web_slow`|The average time to respond to requests, over the last 1 minute, compared to the average of last 10 minutes.<br/>&nbsp;<br/>*Detects if the site or the web API is suddenly a lot slower.*<br/>&nbsp;<br/>(i.e. **oops!** *the database is slow again*)|120/min|&gt; 2x|&gt; 4x
+`1m_successful`|The ratio of successful HTTP responses (1xx, 2xx, 304) over all the requests, during the last minute.<br/>&nbsp;<br/>*Detects if the site or the web API is performing within limits.*<br/>&nbsp;<br/>(i.e. **oops!** *help us God!*)|120/min|&lt; 85%|&lt; 75%
+
+The column `minimum requests` state the minimum number of requests required for the alarm to be evaluated. We found that when the site is receiving requests above this rate, these alarms are pretty accurate (i.e. no false-positives).
+
+[**netdata**](https://my-netdata.io/) alarms are user configurable. Sample config files can be found under directory `health/health.d` of the netdata github repository. So, even [`web_log` alarms can be adapted to your needs](../../../health/health.d/web_log.conf).
+
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fweb_log%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()
diff --git a/collectors/python.d.plugin/web_log/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
new file mode 100644
index 0000000..9927904
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
@@ -0,0 +1,1196 @@
+# -*- coding: utf-8 -*-
+# Description: web log netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import bisect
+import re
+import os
+
+from collections import namedtuple, defaultdict
+from copy import deepcopy
+
+try:
+ from itertools import filterfalse
+except ImportError:
+ from itertools import ifilter as filter
+ from itertools import ifilterfalse as filterfalse
+
+try:
+ from sys import maxint
+except ImportError:
+ from sys import maxsize as maxint
+
+from bases.collection import read_last_line
+from bases.FrameworkServices.LogService import LogService
+
+
+ORDER_APACHE_CACHE = [
+ 'apache_cache',
+]
+
+ORDER_WEB = [
+ 'response_statuses',
+ 'response_codes',
+ 'bandwidth',
+ 'response_time',
+ 'response_time_hist',
+ 'response_time_upstream',
+ 'response_time_upstream_hist',
+ 'requests_per_url',
+ 'requests_per_user_defined',
+ 'http_method',
+ 'vhost',
+ 'port',
+ 'http_version',
+ 'requests_per_ipproto',
+ 'clients',
+ 'clients_all'
+]
+
+ORDER_SQUID = [
+ 'squid_response_statuses',
+ 'squid_response_codes',
+ 'squid_detailed_response_codes',
+ 'squid_method',
+ 'squid_mime_type',
+ 'squid_hier_code',
+ 'squid_transport_methods',
+ 'squid_transport_errors',
+ 'squid_code',
+ 'squid_handling_opts',
+ 'squid_object_types',
+ 'squid_cache_events',
+ 'squid_bytes',
+ 'squid_duration',
+ 'squid_clients',
+ 'squid_clients_all'
+]
+
+CHARTS_WEB = {
+ 'response_codes': {
+ 'options': [None, 'Response Codes', 'requests/s', 'responses', 'web_log.response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['0xx', 'other', 'incremental'],
+ ['unmatched', None, 'incremental']
+ ]
+ },
+ 'bandwidth': {
+ 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'web_log.bandwidth', 'area'],
+ 'lines': [
+ ['resp_length', 'received', 'incremental', 8, 1000],
+ ['bytes_sent', 'sent', 'incremental', -8, 1000]
+ ]
+ },
+ 'response_time': {
+ 'options': [None, 'Processing Time', 'milliseconds', 'timings', 'web_log.response_time', 'area'],
+ 'lines': [
+ ['resp_time_min', 'min', 'incremental', 1, 1000],
+ ['resp_time_max', 'max', 'incremental', 1, 1000],
+ ['resp_time_avg', 'avg', 'incremental', 1, 1000]
+ ]
+ },
+ 'response_time_hist': {
+ 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'],
+ 'lines': []
+ },
+ 'response_time_upstream': {
+ 'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
+ 'web_log.response_time_upstream', 'area'],
+ 'lines': [
+ ['resp_time_upstream_min', 'min', 'incremental', 1, 1000],
+ ['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
+ ['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
+ ]
+ },
+ 'response_time_upstream_hist': {
+ 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings',
+ 'web_log.response_time_upstream_hist', 'line'],
+ 'lines': []
+ },
+ 'clients': {
+ 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
+ 'lines': [
+ ['unique_cur_ipv4', 'ipv4', 'incremental', 1, 1],
+ ['unique_cur_ipv6', 'ipv6', 'incremental', 1, 1]
+ ]
+ },
+ 'clients_all': {
+ 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'clients', 'web_log.clients_all', 'stacked'],
+ 'lines': [
+ ['unique_tot_ipv4', 'ipv4', 'absolute', 1, 1],
+ ['unique_tot_ipv6', 'ipv6', 'absolute', 1, 1]
+ ]
+ },
+ 'http_method': {
+ 'options': [None, 'Requests Per HTTP Method', 'requests/s', 'http methods', 'web_log.http_method', 'stacked'],
+ 'lines': [
+ ['GET', 'GET', 'incremental', 1, 1]
+ ]
+ },
+ 'http_version': {
+ 'options': [None, 'Requests Per HTTP Version', 'requests/s', 'http versions',
+ 'web_log.http_version', 'stacked'],
+ 'lines': []
+ },
+ 'requests_per_ipproto': {
+ 'options': [None, 'Requests Per IP Protocol', 'requests/s', 'ip protocols', 'web_log.requests_per_ipproto',
+ 'stacked'],
+ 'lines': [
+ ['req_ipv4', 'ipv4', 'incremental', 1, 1],
+ ['req_ipv6', 'ipv6', 'incremental', 1, 1]
+ ]
+ },
+ 'response_statuses': {
+ 'options': [None, 'Response Statuses', 'requests/s', 'responses', 'web_log.response_statuses', 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental', 1, 1],
+ ['server_errors', 'error', 'incremental', 1, 1],
+ ['redirects', 'redirect', 'incremental', 1, 1],
+ ['bad_requests', 'bad', 'incremental', 1, 1],
+ ['other_requests', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'requests_per_url': {
+ 'options': [None, 'Requests Per Url', 'requests/s', 'urls', 'web_log.requests_per_url', 'stacked'],
+ 'lines': [
+ ['url_pattern_other', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'requests_per_user_defined': {
+ 'options': [None, 'Requests Per User Defined Pattern', 'requests/s', 'user defined',
+ 'web_log.requests_per_user_defined', 'stacked'],
+ 'lines': [
+ ['user_pattern_other', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'port': {
+ 'options': [None, 'Requests Per Port', 'requests/s', 'port', 'web_log.port', 'stacked'],
+ 'lines': [
+ ['port_80', 'http', 'incremental', 1, 1],
+ ['port_443', 'https', 'incremental', 1, 1]
+ ]
+ },
+ 'vhost': {
+ 'options': [None, 'Requests Per Vhost', 'requests/s', 'vhost', 'web_log.vhost', 'stacked'],
+ 'lines': []
+ }
+}
+
+CHARTS_APACHE_CACHE = {
+ 'apache_cache': {
+ 'options': [None, 'Apache Cached Responses', 'percentage', 'cached', 'web_log.apache_cache_cache',
+ 'stacked'],
+ 'lines': [
+ ['hit', 'cache', 'percentage-of-absolute-row'],
+ ['miss', None, 'percentage-of-absolute-row'],
+ ['other', None, 'percentage-of-absolute-row']
+ ]
+ }
+}
+
+CHARTS_SQUID = {
+ 'squid_duration': {
+ 'options': [None, 'Elapsed Time The Transaction Busied The Cache',
+ 'milliseconds', 'squid_timings', 'web_log.squid_duration', 'area'],
+ 'lines': [
+ ['duration_min', 'min', 'incremental', 1, 1000],
+ ['duration_max', 'max', 'incremental', 1, 1000],
+ ['duration_avg', 'avg', 'incremental', 1, 1000]
+ ]
+ },
+ 'squid_bytes': {
+ 'options': [None, 'Amount Of Data Delivered To The Clients',
+ 'kilobits/s', 'squid_bandwidth', 'web_log.squid_bytes', 'area'],
+ 'lines': [
+ ['bytes', 'sent', 'incremental', 8, 1000]
+ ]
+ },
+ 'squid_response_statuses': {
+ 'options': [None, 'Response Statuses', 'responses/s', 'squid_responses', 'web_log.squid_response_statuses',
+ 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental', 1, 1],
+ ['server_errors', 'error', 'incremental', 1, 1],
+ ['redirects', 'redirect', 'incremental', 1, 1],
+ ['bad_requests', 'bad', 'incremental', 1, 1],
+ ['other_requests', 'other', 'incremental', 1, 1]
+ ]
+ },
+ 'squid_response_codes': {
+ 'options': [None, 'Response Codes', 'responses/s', 'squid_responses',
+ 'web_log.squid_response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['0xx', None, 'incremental'],
+ ['other', None, 'incremental'],
+ ['unmatched', None, 'incremental']
+ ]
+ },
+ 'squid_code': {
+ 'options': [None, 'Responses Per Cache Result Of The Request',
+ 'requests/s', 'squid_squid_cache', 'web_log.squid_code', 'stacked'],
+ 'lines': []
+ },
+ 'squid_detailed_response_codes': {
+ 'options': [None, 'Detailed Response Codes',
+ 'responses/s', 'squid_responses', 'web_log.squid_detailed_response_codes', 'stacked'],
+ 'lines': []
+ },
+ 'squid_hier_code': {
+ 'options': [None, 'Responses Per Hierarchy Code',
+ 'requests/s', 'squid_hierarchy', 'web_log.squid_hier_code', 'stacked'],
+ 'lines': []
+ },
+ 'squid_method': {
+ 'options': [None, 'Requests Per Method',
+ 'requests/s', 'squid_requests', 'web_log.squid_method', 'stacked'],
+ 'lines': []
+ },
+ 'squid_mime_type': {
+ 'options': [None, 'Requests Per MIME Type',
+ 'requests/s', 'squid_requests', 'web_log.squid_mime_type', 'stacked'],
+ 'lines': []
+ },
+ 'squid_clients': {
+ 'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients', 'stacked'],
+ 'lines': [
+ ['unique_ipv4', 'ipv4', 'incremental'],
+ ['unique_ipv6', 'ipv6', 'incremental']
+ ]
+ },
+ 'squid_clients_all': {
+ 'options': [None, 'All Time Unique Client IPs', 'unique ips', 'squid_clients',
+ 'web_log.squid_clients_all', 'stacked'],
+ 'lines': [
+ ['unique_tot_ipv4', 'ipv4', 'absolute'],
+ ['unique_tot_ipv6', 'ipv6', 'absolute']
+ ]
+ },
+ 'squid_transport_methods': {
+ 'options': [None, 'Transport Methods', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_methods', 'stacked'],
+ 'lines': []
+ },
+ 'squid_transport_errors': {
+ 'options': [None, 'Transport Errors', 'requests/s', 'squid_squid_transport',
+ 'web_log.squid_transport_errors', 'stacked'],
+ 'lines': []
+ },
+ 'squid_handling_opts': {
+ 'options': [None, 'Handling Opts', 'requests/s', 'squid_squid_cache',
+ 'web_log.squid_handling_opts', 'stacked'],
+ 'lines': []
+ },
+ 'squid_object_types': {
+ 'options': [None, 'Object Types', 'objects/s', 'squid_squid_cache',
+ 'web_log.squid_object_types', 'stacked'],
+ 'lines': []
+ },
+ 'squid_cache_events': {
+ 'options': [None, 'Cache Events', 'events/s', 'squid_squid_cache',
+ 'web_log.squid_cache_events', 'stacked'],
+ 'lines': []
+ }
+}
+
+NAMED_PATTERN = namedtuple('PATTERN', ['description', 'func'])
+
+DET_RESP_AGGR = ['', '_1xx', '_2xx', '_3xx', '_4xx', '_5xx', '_Other']
+
+SQUID_CODES = {
+ 'TCP': 'squid_transport_methods',
+ 'UDP': 'squid_transport_methods',
+ 'NONE': 'squid_transport_methods',
+ 'CLIENT': 'squid_handling_opts',
+ 'IMS': 'squid_handling_opts',
+ 'ASYNC': 'squid_handling_opts',
+ 'SWAPFAIL': 'squid_handling_opts',
+ 'REFRESH': 'squid_handling_opts',
+ 'SHARED': 'squid_handling_opts',
+ 'REPLY': 'squid_handling_opts',
+ 'NEGATIVE': 'squid_object_types',
+ 'STALE': 'squid_object_types',
+ 'OFFLINE': 'squid_object_types',
+ 'INVALID': 'squid_object_types',
+ 'FAIL': 'squid_object_types',
+ 'MODIFIED': 'squid_object_types',
+ 'UNMODIFIED': 'squid_object_types',
+ 'REDIRECT': 'squid_object_types',
+ 'HIT': 'squid_cache_events',
+ 'MEM': 'squid_cache_events',
+ 'MISS': 'squid_cache_events',
+ 'DENIED': 'squid_cache_events',
+ 'NOFETCH': 'squid_cache_events',
+ 'TUNNEL': 'squid_cache_events',
+ 'ABORTED': 'squid_transport_errors',
+ 'TIMEOUT': 'squid_transport_errors'
+}
+
+REQUEST_REGEX = re.compile(r'(?P<method>[A-Z]+) (?P<url>[^ ]+) [A-Z]+/(?P<http_version>\d(?:.\d)?)')
+
+MIME_TYPES = ['application', 'audio', 'example', 'font', 'image', 'message', 'model', 'multipart', 'text', 'video']
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ """
+ :param configuration:
+ :param name:
+ """
+ LogService.__init__(self, configuration=configuration, name=name)
+ self.configuration = configuration
+ self.log_path = self.configuration.get('path')
+ self.job = None
+
+ def check(self):
+ """
+ :return: bool
+
+ 1. "log_path" is specified in the module configuration file
+ 2. "log_path" must be readable by netdata user and must exist
+ 3. "log_path' must not be empty. We need at least 1 line to find appropriate pattern to parse
+ 4. other checks depends on log "type"
+ """
+
+ log_type = self.configuration.get('type', 'web')
+ log_types = dict(web=Web, apache_cache=ApacheCache, squid=Squid)
+
+ if log_type not in log_types:
+ self.error('bad log type {log_type}. Supported types: {types}'.format(log_type=log_type,
+ types=log_types.keys()))
+ return False
+
+ if not self.log_path:
+ self.error('log path is not specified')
+ return False
+
+ if not (self._find_recent_log_file() and os.access(self.log_path, os.R_OK)):
+ self.error('{log_file} not readable or not exist'.format(log_file=self.log_path))
+ return False
+
+ if not os.path.getsize(self.log_path):
+ self.error('{log_file} is empty'.format(log_file=self.log_path))
+ return False
+
+ self.job = log_types[log_type](self)
+ if self.job.check():
+ self.order = self.job.order
+ self.definitions = self.job.definitions
+ return True
+ return False
+
+ def _get_data(self):
+ return self.job.get_data(self._get_raw_data())
+
+
+class Web:
+ def __init__(self, service):
+ self.service = service
+ self.order = ORDER_WEB[:]
+ self.definitions = deepcopy(CHARTS_WEB)
+ self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
+ self.storage = dict()
+ self.data = {
+ 'bytes_sent': 0,
+ 'resp_length': 0,
+ 'resp_time_min': 0,
+ 'resp_time_max': 0,
+ 'resp_time_avg': 0,
+ 'resp_time_upstream_min': 0,
+ 'resp_time_upstream_max': 0,
+ 'resp_time_upstream_avg': 0,
+ 'unique_cur_ipv4': 0,
+ 'unique_cur_ipv6': 0,
+ '2xx': 0,
+ '5xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '1xx': 0,
+ '0xx': 0,
+ 'unmatched': 0,
+ 'req_ipv4': 0,
+ 'req_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ 'GET': 0
+ }
+
+ def __getattr__(self, item):
+ return getattr(self.service, item)
+
+ def check(self):
+ last_line = read_last_line(self.log_path)
+ if not last_line:
+ return False
+ # Custom_log_format or predefined log format.
+ if self.configuration.get('custom_log_format'):
+ match_dict, error = self.find_regex_custom(last_line)
+ else:
+ match_dict, error = self.find_regex(last_line)
+
+ # "match_dict" is None if there are any problems
+ if match_dict is None:
+ self.error(error)
+ return False
+
+ self.storage['unique_all_time'] = list()
+ self.storage['url_pattern'] = check_patterns('url_pattern', self.configuration.get('categories'))
+ self.storage['user_pattern'] = check_patterns('user_pattern', self.configuration.get('user_defined'))
+
+ self.create_web_charts(match_dict) # Create charts
+ self.info('Collected data: %s' % list(match_dict.keys()))
+ return True
+
+ def create_web_charts(self, match_dict):
+ """
+ :param match_dict: dict: regex.search.groupdict(). Ex. {'address': '127.0.0.1', 'code': '200', 'method': 'GET'}
+ :return:
+ Create/remove additional charts depending on the 'match_dict' keys and configuration file options
+ """
+ if 'resp_time' not in match_dict:
+ self.order.remove('response_time')
+ self.order.remove('response_time_hist')
+ if 'resp_time_upstream' not in match_dict:
+ self.order.remove('response_time_upstream')
+ self.order.remove('response_time_upstream_hist')
+
+ # Add 'response_time_hist' and 'response_time_upstream_hist' charts if is specified in the configuration
+ histogram = self.configuration.get('histogram', None)
+ if isinstance(histogram, list):
+ self.storage['bucket_index'] = histogram[:]
+ self.storage['bucket_index'].append(maxint)
+ self.storage['buckets'] = [0] * (len(histogram) + 1)
+ self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
+ hist_lines = self.definitions['response_time_hist']['lines']
+ upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
+ for i, le in enumerate(histogram):
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
+ hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
+ upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
+
+ hist_lines.append(['response_time_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
+ upstream_hist_lines.append(['response_time_upstream_hist_%d' % len(histogram), '+Inf', 'incremental', 1, 1])
+ elif histogram is not None:
+ self.error('expect histogram list, but was {0}'.format(type(histogram)))
+
+ if not self.configuration.get('all_time', True):
+ self.order.remove('clients_all')
+
+ # Add 'detailed_response_codes' chart if specified in the configuration
+ if self.configuration.get('detailed_response_codes', True):
+ if self.configuration.get('detailed_response_aggregate', True):
+ codes = DET_RESP_AGGR[:1]
+ else:
+ codes = DET_RESP_AGGR[1:]
+
+ for code in codes:
+ self.order.append('detailed_response_codes%s' % code)
+ self.definitions['detailed_response_codes%s' % code] = {
+ 'options': [None, 'Detailed Response Codes %s' % code[1:], 'requests/s', 'responses',
+ 'web_log.detailed_response_codes%s' % code, 'stacked'],
+ 'lines': []
+ }
+
+ # Add 'requests_per_url' chart if specified in the configuration
+ if self.storage['url_pattern']:
+ for elem in self.storage['url_pattern']:
+ dim = [elem.description, elem.description[12:], 'incremental']
+ self.definitions['requests_per_url']['lines'].append(dim)
+ self.data[elem.description] = 0
+ self.data['url_pattern_other'] = 0
+ else:
+ self.order.remove('requests_per_url')
+
+ # Add 'requests_per_user_defined' chart if specified in the configuration
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ for elem in self.storage['user_pattern']:
+ dim = [elem.description, elem.description[13:], 'incremental']
+ self.definitions['requests_per_user_defined']['lines'].append(dim)
+ self.data[elem.description] = 0
+ self.data['user_pattern_other'] = 0
+ else:
+ self.order.remove('requests_per_user_defined')
+
+ def get_data(self, raw_data=None):
+ """
+ Parses new log lines
+ :return: dict OR None
+ None if _get_raw_data method fails.
+ In all other cases - dict.
+ """
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
+
+ unique_current = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
+
+ for line in filtered_data:
+ match = self.storage['regex'].search(line)
+ if match:
+ match_dict = match.groupdict()
+ try:
+ code = match_dict['code'][0] + 'xx'
+ self.data[code] += 1
+ except KeyError:
+ self.data['0xx'] += 1
+ # detailed response code
+ if self.configuration.get('detailed_response_codes', True):
+ self.get_data_per_response_codes_detailed(code=match_dict['code'])
+ # response statuses
+ self.get_data_per_statuses(code=match_dict['code'])
+ # requests per user defined pattern
+ if self.storage['user_pattern'] and 'user_defined' in match_dict:
+ self.get_data_per_pattern(row=match_dict['user_defined'],
+ other='user_pattern_other',
+ pattern=self.storage['user_pattern'])
+ # method, url, http version
+ self.get_data_from_request_field(match_dict=match_dict)
+ # bandwidth sent
+ bytes_sent = match_dict['bytes_sent'] if '-' not in match_dict['bytes_sent'] else 0
+ self.data['bytes_sent'] += int(bytes_sent)
+ # request processing time and bandwidth received
+ if 'resp_length' in match_dict:
+ resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0
+ self.data['resp_length'] += int(resp_length)
+ if 'resp_time' in match_dict:
+ resp_time = self.storage['func_resp_time'](float(match_dict['resp_time']))
+ get_timings(timings=timings['resp_time'], time=resp_time)
+ if 'bucket_index' in self.storage:
+ get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000)
+ if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
+ resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream']))
+ get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream)
+ if 'bucket_index' in self.storage:
+ get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000)
+ # requests per ip proto
+ proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4'
+ self.data['req_' + proto] += 1
+ # unique clients ips
+ if self.configuration.get('all_time', True):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match_dict['address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
+ if match_dict['address'] not in unique_current:
+ self.data['unique_cur_' + proto] += 1
+ unique_current.add(match_dict['address'])
+ else:
+ self.data['unmatched'] += 1
+
+ # timings
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
+
+ # histogram
+ if 'bucket_index' in self.storage:
+ buckets = self.storage['buckets']
+ upstream_buckets = self.storage['upstream_buckets']
+ for i in range(0, len(self.storage['bucket_index'])):
+ hist_key = 'response_time_hist_%d' % i
+ upstream_hist_key = 'response_time_upstream_hist_%d' % i
+ self.data[hist_key] = buckets[i]
+ self.data[upstream_hist_key] = upstream_buckets[i]
+
+ return self.data
+
+ def find_regex(self, last_line):
+ """
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+ We need to find appropriate pattern for current log file
+ All logic is do a regex search through the string for all predefined patterns
+ until we find something or fail.
+ """
+ # REGEX: 1.IPv4 address 2.HTTP method 3. URL 4. Response code
+ # 5. Bytes sent 6. Response length 7. Response process time
+ default = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)')
+
+ apache_ext_insert = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' (?P<resp_length>\d+|-)'
+ r' (?P<resp_time>\d+) ')
+
+ apache_ext_append = re.compile(r'(?P<address>[\da-f.:]+|localhost)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+|-)'
+ r' .*?'
+ r' (?P<resp_length>\d+|-)'
+ r' (?P<resp_time>\d+)'
+ r'(?: |$)')
+
+ nginx_ext_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+) ')
+
+ nginx_ext2_insert = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)'
+ r' (?P<resp_time_upstream>[\d.-]+) ')
+
+ nginx_ext_append = re.compile(r'(?P<address>[\da-f.:]+)'
+ r' -.*?"(?P<request>[^"]*)"'
+ r' (?P<code>[1-9]\d{2})'
+ r' (?P<bytes_sent>\d+)'
+ r' .*?'
+ r' (?P<resp_length>\d+)'
+ r' (?P<resp_time>\d+\.\d+)')
+
+ def func_usec(time):
+ return time
+
+ def func_sec(time):
+ return time * 1000000
+
+ r_regex = [apache_ext_insert, apache_ext_append,
+ nginx_ext2_insert, nginx_ext_insert, nginx_ext_append,
+ default]
+ r_function = [func_usec, func_usec, func_sec, func_sec, func_sec, func_usec]
+ regex_function = zip(r_regex, r_function)
+
+ match_dict = dict()
+ for regex, func in regex_function:
+ match = regex.search(last_line)
+ if match:
+ self.storage['regex'] = regex
+ self.storage['func_resp_time'] = func
+ match_dict = match.groupdict()
+ break
+
+ return find_regex_return(match_dict=match_dict or None,
+ msg='Unknown log format. You need to use "custom_log_format" feature.')
+
+ def find_regex_custom(self, last_line):
+ """
+ :param last_line: str: literally last line from log file
+ :return: tuple where:
+ [0]: dict or None: match_dict or None
+ [1]: str: error description
+
+ We are here only if "custom_log_format" is in logs. We need to make sure:
+ 1. "custom_log_format" is a dict
+ 2. "pattern" in "custom_log_format" and pattern is <str> instance
+ 3. if "time_multiplier" is in "custom_log_format" it must be <int> or <float> instance
+
+ If all parameters is ok we need to make sure:
+ 1. Pattern search is success
+ 2. Pattern search contains named subgroups (?P<subgroup_name>) (= "match_dict")
+
+ If pattern search is success we need to make sure:
+ 1. All mandatory keys ['address', 'code', 'bytes_sent', 'method', 'url'] are in "match_dict"
+
+ If this is True we need to make sure:
+ 1. All mandatory key values from "match_dict" have the correct format
+ ("code" is integer, "method" is uppercase word, etc)
+
+ If non mandatory keys in "match_dict" we need to make sure:
+ 1. All non mandatory key values from match_dict ['resp_length', 'resp_time'] have the correct format
+ ("resp_length" is integer or "-", "resp_time" is integer or float)
+
+ """
+ if not hasattr(self.configuration.get('custom_log_format'), 'keys'):
+ return find_regex_return(msg='Custom log: "custom_log_format" is not a <dict>')
+
+ pattern = self.configuration.get('custom_log_format', dict()).get('pattern')
+ if not (pattern and isinstance(pattern, str)):
+ return find_regex_return(msg='Custom log: "pattern" option is not specified or type is not <str>')
+
+ resp_time_func = self.configuration.get('custom_log_format', dict()).get('time_multiplier') or 0
+
+ if not isinstance(resp_time_func, (int, float)):
+ return find_regex_return(msg='Custom log: "time_multiplier" is not an integer or a float')
+
+ try:
+ regex = re.compile(pattern)
+ except re.error as error:
+ return find_regex_return(msg='Pattern compile error: %s' % str(error))
+
+ match = regex.search(last_line)
+ if not match:
+ return find_regex_return(msg='Custom log: pattern search FAILED')
+
+ match_dict = match.groupdict() or None
+ if match_dict is None:
+ return find_regex_return(msg='Custom log: search OK but contains no named subgroups'
+ ' (you need to use ?P<subgroup_name>)')
+ mandatory_dict = {'address': r'[\w.:-]+',
+ 'code': r'[1-9]\d{2}',
+ 'bytes_sent': r'\d+|-'}
+ optional_dict = {'resp_length': r'\d+|-',
+ 'resp_time': r'[\d.]+',
+ 'resp_time_upstream': r'[\d.-]+',
+ 'method': r'[A-Z]+',
+ 'http_version': r'\d(?:.\d)?'}
+
+ mandatory_values = set(mandatory_dict) - set(match_dict)
+ if mandatory_values:
+ return find_regex_return(msg='Custom log: search OK but some mandatory keys (%s) are missing'
+ % list(mandatory_values))
+ for key in mandatory_dict:
+ if not re.search(mandatory_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ optional_values = set(optional_dict) & set(match_dict)
+ for key in optional_values:
+ if not re.search(optional_dict[key], match_dict[key]):
+ return find_regex_return(msg='Custom log: can\'t parse "%s": %s'
+ % (key, match_dict[key]))
+
+ dot_in_time = '.' in match_dict.get('resp_time', '')
+ if dot_in_time:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1000000)
+ else:
+ self.storage['func_resp_time'] = lambda time: time * (resp_time_func or 1)
+
+ self.storage['regex'] = regex
+ return find_regex_return(match_dict=match_dict)
+
+ def get_data_from_request_field(self, match_dict):
+ if match_dict.get('request'):
+ match_dict = REQUEST_REGEX.search(match_dict['request'])
+ if match_dict:
+ match_dict = match_dict.groupdict()
+ else:
+ return
+ # requests per url
+ if match_dict.get('url') and self.storage['url_pattern']:
+ self.get_data_per_pattern(row=match_dict['url'],
+ other='url_pattern_other',
+ pattern=self.storage['url_pattern'])
+ # requests per http method
+ if match_dict.get('method'):
+ if match_dict['method'] not in self.data:
+ self.charts['http_method'].add_dimension([match_dict['method'],
+ match_dict['method'],
+ 'incremental'])
+ self.data[match_dict['method']] = 0
+ self.data[match_dict['method']] += 1
+ # requests per http version
+ if match_dict.get('http_version'):
+ dim_id = match_dict['http_version'].replace('.', '_')
+ if dim_id not in self.data:
+ self.charts['http_version'].add_dimension([dim_id,
+ match_dict['http_version'],
+ 'incremental'])
+ self.data[dim_id] = 0
+ self.data[dim_id] += 1
+ # requests per port number
+ if match_dict.get('port'):
+ if match_dict['port'] not in self.data:
+ self.charts['port'].add_dimension([match_dict['port'],
+ match_dict['port'],
+ 'incremental'])
+ self.data[match_dict['port']] = 0
+ self.data[match_dict['port']] += 1
+ # requests per vhost
+ if match_dict.get('vhost'):
+ dim_id = match_dict['vhost'].replace('.', '_')
+ if dim_id not in self.data:
+ self.charts['vhost'].add_dimension([dim_id,
+ match_dict['vhost'],
+ 'incremental'])
+ self.data[dim_id] = 0
+ self.data[dim_id] += 1
+
+ def get_data_per_response_codes_detailed(self, code):
+ """
+ :param code: str: CODE from parsed line. Ex.: '202, '499'
+ :return:
+ Calls add_new_dimension method If the value is found for the first time
+ """
+ if code not in self.data:
+ if self.configuration.get('detailed_response_aggregate', True):
+ self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
+ self.data[code] = 0
+ else:
+ code_index = int(code[0]) if int(code[0]) < 6 else 6
+ chart_key = 'detailed_response_codes' + DET_RESP_AGGR[code_index]
+ self.charts[chart_key].add_dimension([code, code, 'incremental'])
+ self.data[code] = 0
+ self.data[code] += 1
+
+ def get_data_per_pattern(self, row, other, pattern):
+ """
+ :param row: str:
+ :param other: str:
+ :param pattern: named tuple: (['pattern_description', 'regular expression'])
+ :return:
+ Scan through string looking for the first location where patterns produce a match for all user
+ defined patterns
+ """
+ match = None
+ for elem in pattern:
+ if elem.func(row):
+ self.data[elem.description] += 1
+ match = True
+ break
+ if not match:
+ self.data[other] += 1
+
+ def get_data_per_statuses(self, code):
+ """
+ :param code: str: response status code. Ex.: '202', '499'
+ :return:
+ """
+ code_class = code[0]
+ if code_class == '2' or code == '304' or code_class == '1':
+ self.data['successful_requests'] += 1
+ elif code_class == '3':
+ self.data['redirects'] += 1
+ elif code_class == '4':
+ self.data['bad_requests'] += 1
+ elif code_class == '5':
+ self.data['server_errors'] += 1
+ else:
+ self.data['other_requests'] += 1
+
+
+class ApacheCache:
+ def __init__(self, service):
+ self.service = service
+ self.order = ORDER_APACHE_CACHE
+ self.definitions = CHARTS_APACHE_CACHE
+
+ @staticmethod
+ def check():
+ return True
+
+ @staticmethod
+ def get_data(raw_data=None):
+ data = dict(hit=0, miss=0, other=0)
+ if not raw_data:
+ return None if raw_data is None else data
+
+ for line in raw_data:
+ if 'cache hit' in line:
+ data['hit'] += 1
+ elif 'cache miss' in line:
+ data['miss'] += 1
+ else:
+ data['other'] += 1
+ return data
+
+
+class Squid:
+ def __init__(self, service):
+ self.service = service
+ self.order = ORDER_SQUID
+ self.definitions = CHARTS_SQUID
+ self.pre_filter = check_patterns('filter', self.configuration.get('filter'))
+ self.storage = dict()
+ self.data = {
+ 'duration_max': 0,
+ 'duration_avg': 0,
+ 'duration_min': 0,
+ 'bytes': 0,
+ '0xx': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'unmatched': 0,
+ 'unique_ipv4': 0,
+ 'unique_ipv6': 0,
+ 'unique_tot_ipv4': 0,
+ 'unique_tot_ipv6': 0,
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0
+ }
+
+ def __getattr__(self, item):
+ return getattr(self.service, item)
+
+ def check(self):
+ last_line = read_last_line(self.log_path)
+ if not last_line:
+ return False
+ self.storage['unique_all_time'] = list()
+ self.storage['regex'] = re.compile(r'[0-9.]+\s+(?P<duration>[0-9]+)'
+ r' (?P<client_address>[\da-f.:]+)'
+ r' (?P<squid_code>[A-Z_]+)/'
+ r'(?P<http_code>[0-9]+)'
+ r' (?P<bytes>[0-9]+)'
+ r' (?P<method>[A-Z_]+)'
+ r' (?P<url>[^ ]+)'
+ r' (?P<user>[^ ]+)'
+ r' (?P<hier_code>[A-Z_]+)/[\da-z.:-]+'
+ r' (?P<mime_type>[A-Za-z-]*)')
+
+ match = self.storage['regex'].search(last_line)
+ if not match:
+ self.error('Regex not matches (%s)' % self.storage['regex'].pattern)
+ return False
+ self.storage['dynamic'] = {
+ 'http_code': {
+ 'chart': 'squid_detailed_response_codes',
+ 'func_dim_id': None,
+ 'func_dim': None
+ },
+ 'hier_code': {
+ 'chart': 'squid_hier_code',
+ 'func_dim_id': None,
+ 'func_dim': lambda v: v.replace('HIER_', '')
+ },
+ 'method': {
+ 'chart': 'squid_method',
+ 'func_dim_id': None,
+ 'func_dim': None
+ },
+ 'mime_type': {
+ 'chart': 'squid_mime_type',
+ 'func_dim_id': lambda v: str.lower(v) if str.lower(v) in MIME_TYPES else 'unknown',
+ 'func_dim': None
+ }
+ }
+ if not self.configuration.get('all_time', True):
+ self.order.remove('squid_clients_all')
+ return True
+
+ def get_data(self, raw_data=None):
+ if not raw_data:
+ return None if raw_data is None else self.data
+
+ filtered_data = filter_data(raw_data=raw_data, pre_filter=self.pre_filter)
+
+ unique_ip = set()
+ timings = defaultdict(lambda: dict(minimum=None, maximum=0, summary=0, count=0))
+
+ for row in filtered_data:
+ match = self.storage['regex'].search(row)
+ if match:
+ match = match.groupdict()
+ if match['duration'] != '0':
+ get_timings(timings=timings['duration'], time=float(match['duration']) * 1000)
+ try:
+ self.data[match['http_code'][0] + 'xx'] += 1
+ except KeyError:
+ self.data['other'] += 1
+
+ self.get_data_per_statuses(match['http_code'])
+
+ self.get_data_per_squid_code(match['squid_code'])
+
+ self.data['bytes'] += int(match['bytes'])
+
+ proto = 'ipv4' if '.' in match['client_address'] else 'ipv6'
+ # unique clients ips
+ if self.configuration.get('all_time', True):
+ if address_not_in_pool(pool=self.storage['unique_all_time'],
+ address=match['client_address'],
+ pool_size=self.data['unique_tot_ipv4'] + self.data['unique_tot_ipv6']):
+ self.data['unique_tot_' + proto] += 1
+
+ if match['client_address'] not in unique_ip:
+ self.data['unique_' + proto] += 1
+ unique_ip.add(match['client_address'])
+
+ for key, values in self.storage['dynamic'].items():
+ if match[key] == '-':
+ continue
+ dimension_id = values['func_dim_id'](match[key]) if values['func_dim_id'] else match[key]
+ if dimension_id not in self.data:
+ dimension = values['func_dim'](match[key]) if values['func_dim'] else dimension_id
+ self.charts[values['chart']].add_dimension([dimension_id,
+ dimension,
+ 'incremental'])
+ self.data[dimension_id] = 0
+ self.data[dimension_id] += 1
+ else:
+ self.data['unmatched'] += 1
+
+ for elem in timings:
+ self.data[elem + '_min'] += timings[elem]['minimum']
+ self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
+ self.data[elem + '_max'] += timings[elem]['maximum']
+ return self.data
+
+ def get_data_per_statuses(self, code):
+ """
+ :param code: str: response status code. Ex.: '202', '499'
+ :return:
+ """
+ code_class = code[0]
+ if code_class == '2' or code == '304' or code_class == '1' or code == '000':
+ self.data['successful_requests'] += 1
+ elif code_class == '3':
+ self.data['redirects'] += 1
+ elif code_class == '4':
+ self.data['bad_requests'] += 1
+ elif code_class == '5' or code_class == '6':
+ self.data['server_errors'] += 1
+ else:
+ self.data['other_requests'] += 1
+
+ def get_data_per_squid_code(self, code):
+ """
+ :param code: str: squid response code. Ex.: 'TCP_MISS', 'TCP_MISS_ABORTED'
+ :return:
+ """
+ if code not in self.data:
+ self.charts['squid_code'].add_dimension([code, code, 'incremental'])
+ self.data[code] = 0
+ self.data[code] += 1
+
+ for tag in code.split('_'):
+ try:
+ chart_key = SQUID_CODES[tag]
+ except KeyError:
+ continue
+ dimension_id = '_'.join(['code_detailed', tag])
+ if dimension_id not in self.data:
+ self.charts[chart_key].add_dimension([dimension_id, tag, 'incremental'])
+ self.data[dimension_id] = 0
+ self.data[dimension_id] += 1
+
+
+def get_timings(timings, time):
+ """
+ :param timings:
+ :param time:
+ :return:
+ """
+ if timings['minimum'] is None:
+ timings['minimum'] = time
+ if time > timings['maximum']:
+ timings['maximum'] = time
+ elif time < timings['minimum']:
+ timings['minimum'] = time
+ timings['summary'] += time
+ timings['count'] += 1
+
+
+def get_hist(index, buckets, time):
+ """
+ :param index: histogram index (Ex. [10, 50, 100, 150, ...])
+ :param buckets: histogram buckets
+ :param time: time
+ :return: None
+ """
+ for i in range(len(index)-1, -1, -1):
+ if time <= index[i]:
+ buckets[i] += 1
+ else:
+ break
+
+
+def address_not_in_pool(pool, address, pool_size):
+ """
+ :param pool: list of ip addresses
+ :param address: ip address
+ :param pool_size: current pool size
+ :return: True if address not in pool. False otherwise.
+ """
+ index = bisect.bisect_left(pool, address)
+ if index < pool_size:
+ if pool[index] == address:
+ return False
+ bisect.insort_left(pool, address)
+ return True
+ bisect.insort_left(pool, address)
+ return True
+
+
+def find_regex_return(match_dict=None, msg='Generic error message'):
+ """
+ :param match_dict: dict: re.search.groupdict() or None
+ :param msg: str: error description
+ :return: tuple:
+ """
+ return match_dict, msg
+
+
+def check_patterns(string, dimension_regex_dict):
+ """
+ :param string: str:
+ :param dimension_regex_dict: dict: ex. {'dim1': '<pattern1>', 'dim2': '<pattern2>'}
+ :return: list of named tuples or None:
+ We need to make sure all patterns are valid regular expressions
+ """
+ if not hasattr(dimension_regex_dict, 'keys'):
+ return None
+
+ result = list()
+
+ def valid_pattern(pattern):
+ """
+ :param pattern: str
+ :return: re.compile(pattern) or None
+ """
+ if not isinstance(pattern, str):
+ return False
+ try:
+ return re.compile(pattern)
+ except re.error:
+ return False
+
+ def func_search(pattern):
+ def closure(v):
+ return pattern.search(v)
+
+ return closure
+
+ for dimension, regex in dimension_regex_dict.items():
+ valid = valid_pattern(regex)
+ if isinstance(dimension, str) and valid_pattern:
+ func = func_search(valid)
+ result.append(NAMED_PATTERN(description='_'.join([string, dimension]),
+ func=func))
+ return result or None
+
+
+def filter_data(raw_data, pre_filter):
+ """
+ :param raw_data:
+ :param pre_filter:
+ :return:
+ """
+
+ if not pre_filter:
+ return raw_data
+ filtered = raw_data
+ for elem in pre_filter:
+ if elem.description == 'filter_include':
+ filtered = filter(elem.func, filtered)
+ elif elem.description == 'filter_exclude':
+ filtered = filterfalse(elem.func, filtered)
+ return filtered
diff --git a/collectors/python.d.plugin/web_log/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf
new file mode 100644
index 0000000..0ac17f6
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/web_log.conf
@@ -0,0 +1,204 @@
+# netdata python.d.plugin configuration for web log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+
+# ----------------------------------------------------------------------
+# PLUGIN CONFIGURATION
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, web_log also supports the following:
+#
+# path: 'PATH' # the path to web server log file
+# path: 'PATH[0-9]*[0-9]' # log files with date suffix are also supported
+# detailed_response_codes: yes/no # default: yes. Additional chart where response codes are not grouped
+# detailed_response_aggregate: yes/no # default: yes. Not aggregated detailed response codes charts
+# all_time : yes/no # default: yes. All time unique client IPs chart (50000 addresses ~ 400KB)
+# filter: # filter with regex
+# include: 'REGEX' # only those rows that matches the regex
+# exclude: 'REGEX' # all rows except those that matches the regex
+# categories: # requests per url chart configuration
+# cacti: 'cacti.*' # name(dimension): REGEX to match
+# observium: 'observium.*' # name(dimension): REGEX to match
+# stub_status: 'stub_status' # name(dimension): REGEX to match
+# user_defined: # requests per pattern in <user_defined> field (custom_log_format)
+# cacti: 'cacti.*' # name(dimension): REGEX to match
+# observium: 'observium.*' # name(dimension): REGEX to match
+# stub_status: 'stub_status' # name(dimension): REGEX to match
+# custom_log_format: # define a custom log format
+# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+\.\d+) '
+# time_multiplier: 1000000 # type <int>/<float> - convert time to microseconds
+# histogram: [1,3,10,30,100, ...] # type list of int - Cumulative histogram of response time in milli seconds
+
+# ----------------------------------------------------------------------
+# WEB SERVER CONFIGURATION
+#
+# Make sure the web server log directory and the web server log files
+# can be read by user 'netdata'.
+#
+# To enable the timings chart and the requests size dimension, the
+# web server needs to log them. This is how to add them:
+#
+# nginx:
+# log_format netdata '$remote_addr - $remote_user [$time_local] '
+# '"$request" $status $body_bytes_sent '
+# '$request_length $request_time $upstream_response_time '
+# '"$http_referer" "$http_user_agent"';
+# access_log /var/log/nginx/access.log netdata;
+#
+# apache (you need mod_logio enabled):
+# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata
+# LogFormat "%h %l %u %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" netdata
+# CustomLog "/var/log/apache2/access.log" netdata
+
+# ----------------------------------------------------------------------
+# VHOST AND PORT
+# if your want to graph the request/sec per virtual host and per port (to check the number of requests in http vs https)
+
+# in apache : (%v gives the hostname, %p the port number)
+# LogFormat "%v %p %h %t \"%r\" %>s %O %I %D \"%{Referer}i\" \"%{User-Agent}i\"" vhost_netdata
+#
+# and in this file in apache_vhosts_log section, add :
+# custom_log_format:
+# pattern: '(?P<vhost>[a-zA-Z\d.-_]+) (?P<port>\d+) (?P<address>[\da-f.:]+) \[.*\] "(?P<method>[A-Z]+)[^"]*" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d+)'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them per web server will run (when they have the same name)
+
+
+# -------------------------------------------
+# nginx log on various distros
+
+# debian, arch
+nginx_log:
+ name: 'nginx'
+ path: '/var/log/nginx/access.log'
+
+# gentoo
+nginx_log2:
+ name: 'nginx'
+ path: '/var/log/nginx/localhost.access_log'
+
+
+# -------------------------------------------
+# apache log on various distros
+
+# debian
+apache_log:
+ name: 'apache'
+ path: '/var/log/apache2/access.log'
+
+# gentoo
+apache_log2:
+ name: 'apache'
+ path: '/var/log/apache2/access_log'
+
+# arch
+apache_log3:
+ name: 'apache'
+ path: '/var/log/httpd/access_log'
+
+# debian
+apache_vhosts_log:
+ name: 'apache_vhosts'
+ path: '/var/log/apache2/other_vhosts_access.log'
+
+
+# -------------------------------------------
+# gunicorn log on various distros
+
+gunicorn_log:
+ name: 'gunicorn'
+ path: '/var/log/gunicorn/access.log'
+
+gunicorn_log2:
+ name: 'gunicorn'
+ path: '/var/log/gunicorn/gunicorn-access.log'
+
+# -------------------------------------------
+# Apache Cache
+apache_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache/cache.log'
+
+apache2_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache2/cache.log'
+
+httpd_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/httpd/cache.log'
+
+# -------------------------------------------
+# Squid
+
+# debian/ubuntu
+squid_log1:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid3/access.log'
+
+#gentoo
+squid_log2:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid/access.log'