summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--collectors/python.d.plugin/Makefile.am229
-rw-r--r--collectors/python.d.plugin/README.md77
-rw-r--r--collectors/python.d.plugin/adaptec_raid/Makefile.inc13
l---------collectors/python.d.plugin/adaptec_raid/README.md1
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py247
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf53
-rw-r--r--collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md204
-rw-r--r--collectors/python.d.plugin/adaptec_raid/metadata.yaml167
-rw-r--r--collectors/python.d.plugin/alarms/Makefile.inc13
l---------collectors/python.d.plugin/alarms/README.md1
-rw-r--r--collectors/python.d.plugin/alarms/alarms.chart.py95
-rw-r--r--collectors/python.d.plugin/alarms/alarms.conf60
-rw-r--r--collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md201
-rw-r--r--collectors/python.d.plugin/alarms/metadata.yaml177
-rw-r--r--collectors/python.d.plugin/am2320/Makefile.inc8
l---------collectors/python.d.plugin/am2320/README.md1
-rw-r--r--collectors/python.d.plugin/am2320/am2320.chart.py68
-rw-r--r--collectors/python.d.plugin/am2320/am2320.conf68
-rw-r--r--collectors/python.d.plugin/am2320/integrations/am2320.md181
-rw-r--r--collectors/python.d.plugin/am2320/metadata.yaml135
-rw-r--r--collectors/python.d.plugin/anomalies/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/anomalies/README.md248
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.chart.py425
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.conf184
-rw-r--r--collectors/python.d.plugin/anomalies/metadata.yaml87
-rw-r--r--collectors/python.d.plugin/beanstalk/Makefile.inc13
l---------collectors/python.d.plugin/beanstalk/README.md1
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py252
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.conf78
-rw-r--r--collectors/python.d.plugin/beanstalk/integrations/beanstalk.md219
-rw-r--r--collectors/python.d.plugin/beanstalk/metadata.yaml263
-rw-r--r--collectors/python.d.plugin/bind_rndc/Makefile.inc13
l---------collectors/python.d.plugin/bind_rndc/README.md1
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py252
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.conf108
-rw-r--r--collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md215
-rw-r--r--collectors/python.d.plugin/bind_rndc/metadata.yaml191
-rw-r--r--collectors/python.d.plugin/boinc/Makefile.inc13
l---------collectors/python.d.plugin/boinc/README.md1
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py168
-rw-r--r--collectors/python.d.plugin/boinc/boinc.conf66
-rw-r--r--collectors/python.d.plugin/boinc/integrations/boinc.md204
-rw-r--r--collectors/python.d.plugin/boinc/metadata.yaml198
-rw-r--r--collectors/python.d.plugin/ceph/Makefile.inc13
l---------collectors/python.d.plugin/ceph/README.md1
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py374
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf75
-rw-r--r--collectors/python.d.plugin/ceph/integrations/ceph.md194
-rw-r--r--collectors/python.d.plugin/ceph/metadata.yaml223
-rw-r--r--collectors/python.d.plugin/changefinder/Makefile.inc13
l---------collectors/python.d.plugin/changefinder/README.md1
-rw-r--r--collectors/python.d.plugin/changefinder/changefinder.chart.py185
-rw-r--r--collectors/python.d.plugin/changefinder/changefinder.conf74
-rw-r--r--collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md217
-rw-r--r--collectors/python.d.plugin/changefinder/metadata.yaml212
-rw-r--r--collectors/python.d.plugin/dovecot/Makefile.inc13
l---------collectors/python.d.plugin/dovecot/README.md1
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py143
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.conf98
-rw-r--r--collectors/python.d.plugin/dovecot/integrations/dovecot.md197
-rw-r--r--collectors/python.d.plugin/dovecot/metadata.yaml207
-rw-r--r--collectors/python.d.plugin/example/Makefile.inc13
l---------collectors/python.d.plugin/example/README.md1
-rw-r--r--collectors/python.d.plugin/example/example.chart.py51
-rw-r--r--collectors/python.d.plugin/example/example.conf87
-rw-r--r--collectors/python.d.plugin/example/integrations/example_collector.md171
-rw-r--r--collectors/python.d.plugin/example/metadata.yaml138
-rw-r--r--collectors/python.d.plugin/exim/Makefile.inc13
l---------collectors/python.d.plugin/exim/README.md1
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py39
-rw-r--r--collectors/python.d.plugin/exim/exim.conf91
-rw-r--r--collectors/python.d.plugin/exim/integrations/exim.md181
-rw-r--r--collectors/python.d.plugin/exim/metadata.yaml132
-rw-r--r--collectors/python.d.plugin/fail2ban/Makefile.inc13
l---------collectors/python.d.plugin/fail2ban/README.md1
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py217
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.conf68
-rw-r--r--collectors/python.d.plugin/fail2ban/integrations/fail2ban.md209
-rw-r--r--collectors/python.d.plugin/fail2ban/metadata.yaml200
-rw-r--r--collectors/python.d.plugin/gearman/Makefile.inc13
l---------collectors/python.d.plugin/gearman/README.md1
-rw-r--r--collectors/python.d.plugin/gearman/gearman.chart.py243
-rw-r--r--collectors/python.d.plugin/gearman/gearman.conf75
-rw-r--r--collectors/python.d.plugin/gearman/integrations/gearman.md210
-rw-r--r--collectors/python.d.plugin/gearman/metadata.yaml168
-rw-r--r--collectors/python.d.plugin/go_expvar/Makefile.inc13
l---------collectors/python.d.plugin/go_expvar/README.md1
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py253
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.conf108
-rw-r--r--collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md335
-rw-r--r--collectors/python.d.plugin/go_expvar/metadata.yaml329
-rw-r--r--collectors/python.d.plugin/haproxy/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/haproxy/README.md90
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py368
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf83
-rw-r--r--collectors/python.d.plugin/haproxy/metadata.yaml322
-rw-r--r--collectors/python.d.plugin/hddtemp/Makefile.inc13
l---------collectors/python.d.plugin/hddtemp/README.md1
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py99
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.conf95
-rw-r--r--collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md217
-rw-r--r--collectors/python.d.plugin/hddtemp/metadata.yaml163
-rw-r--r--collectors/python.d.plugin/hpssa/Makefile.inc13
l---------collectors/python.d.plugin/hpssa/README.md1
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.chart.py396
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.conf61
-rw-r--r--collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md205
-rw-r--r--collectors/python.d.plugin/hpssa/metadata.yaml185
-rw-r--r--collectors/python.d.plugin/icecast/Makefile.inc13
l---------collectors/python.d.plugin/icecast/README.md1
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py94
-rw-r--r--collectors/python.d.plugin/icecast/icecast.conf81
-rw-r--r--collectors/python.d.plugin/icecast/integrations/icecast.md166
-rw-r--r--collectors/python.d.plugin/icecast/metadata.yaml127
-rw-r--r--collectors/python.d.plugin/ipfs/Makefile.inc13
l---------collectors/python.d.plugin/ipfs/README.md1
-rw-r--r--collectors/python.d.plugin/ipfs/integrations/ipfs.md203
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py149
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf82
-rw-r--r--collectors/python.d.plugin/ipfs/metadata.yaml172
-rw-r--r--collectors/python.d.plugin/litespeed/Makefile.inc13
l---------collectors/python.d.plugin/litespeed/README.md1
-rw-r--r--collectors/python.d.plugin/litespeed/integrations/litespeed.md170
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py188
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.conf72
-rw-r--r--collectors/python.d.plugin/litespeed/metadata.yaml168
-rw-r--r--collectors/python.d.plugin/megacli/Makefile.inc13
l---------collectors/python.d.plugin/megacli/README.md1
-rw-r--r--collectors/python.d.plugin/megacli/integrations/megacli.md220
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py278
-rw-r--r--collectors/python.d.plugin/megacli/megacli.conf60
-rw-r--r--collectors/python.d.plugin/megacli/metadata.yaml193
-rw-r--r--collectors/python.d.plugin/memcached/Makefile.inc13
l---------collectors/python.d.plugin/memcached/README.md1
-rw-r--r--collectors/python.d.plugin/memcached/integrations/memcached.md215
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py197
-rw-r--r--collectors/python.d.plugin/memcached/memcached.conf90
-rw-r--r--collectors/python.d.plugin/memcached/metadata.yaml247
-rw-r--r--collectors/python.d.plugin/monit/Makefile.inc13
l---------collectors/python.d.plugin/monit/README.md1
-rw-r--r--collectors/python.d.plugin/monit/integrations/monit.md214
-rw-r--r--collectors/python.d.plugin/monit/metadata.yaml217
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py360
-rw-r--r--collectors/python.d.plugin/monit/monit.conf86
-rw-r--r--collectors/python.d.plugin/nsd/Makefile.inc13
l---------collectors/python.d.plugin/nsd/README.md1
-rw-r--r--collectors/python.d.plugin/nsd/integrations/name_server_daemon.md199
-rw-r--r--collectors/python.d.plugin/nsd/metadata.yaml201
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py105
-rw-r--r--collectors/python.d.plugin/nsd/nsd.conf91
-rw-r--r--collectors/python.d.plugin/nvidia_smi/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md157
-rw-r--r--collectors/python.d.plugin/nvidia_smi/metadata.yaml166
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py651
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf68
-rw-r--r--collectors/python.d.plugin/openldap/Makefile.inc13
l---------collectors/python.d.plugin/openldap/README.md1
-rw-r--r--collectors/python.d.plugin/openldap/integrations/openldap.md215
-rw-r--r--collectors/python.d.plugin/openldap/metadata.yaml225
-rw-r--r--collectors/python.d.plugin/openldap/openldap.chart.py216
-rw-r--r--collectors/python.d.plugin/openldap/openldap.conf75
-rw-r--r--collectors/python.d.plugin/oracledb/Makefile.inc13
l---------collectors/python.d.plugin/oracledb/README.md1
-rw-r--r--collectors/python.d.plugin/oracledb/integrations/oracle_db.md226
-rw-r--r--collectors/python.d.plugin/oracledb/metadata.yaml309
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.chart.py846
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.conf88
-rw-r--r--collectors/python.d.plugin/pandas/Makefile.inc13
l---------collectors/python.d.plugin/pandas/README.md1
-rw-r--r--collectors/python.d.plugin/pandas/integrations/pandas.md365
-rw-r--r--collectors/python.d.plugin/pandas/metadata.yaml308
-rw-r--r--collectors/python.d.plugin/pandas/pandas.chart.py99
-rw-r--r--collectors/python.d.plugin/pandas/pandas.conf211
-rw-r--r--collectors/python.d.plugin/postfix/Makefile.inc13
l---------collectors/python.d.plugin/postfix/README.md1
-rw-r--r--collectors/python.d.plugin/postfix/integrations/postfix.md151
-rw-r--r--collectors/python.d.plugin/postfix/metadata.yaml124
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py52
-rw-r--r--collectors/python.d.plugin/postfix/postfix.conf72
-rw-r--r--collectors/python.d.plugin/puppet/Makefile.inc13
l---------collectors/python.d.plugin/puppet/README.md1
-rw-r--r--collectors/python.d.plugin/puppet/integrations/puppet.md215
-rw-r--r--collectors/python.d.plugin/puppet/metadata.yaml185
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py121
-rw-r--r--collectors/python.d.plugin/puppet/puppet.conf94
-rw-r--r--collectors/python.d.plugin/python.d.conf78
-rw-r--r--collectors/python.d.plugin/python.d.plugin.in946
-rw-r--r--collectors/python.d.plugin/python_modules/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py91
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py82
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py163
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py261
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py336
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py188
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py431
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py117
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loaders.py46
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py198
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/__init__.py316
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/constructor.py676
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/emitter.py1141
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/reader.py191
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/representer.py485
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/scanner.py1458
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/__init__.py313
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/constructor.py687
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/emitter.py1138
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/reader.py193
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/representer.py375
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/scanner.py1449
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/boinc_client.py515
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/filelock.py451
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/lm_sensors.py327
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/mcrcon.py74
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/monotonic.py201
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/ordereddict.py110
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/__init__.py98
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/_collections.py320
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connection.py374
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connectionpool.py900
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py591
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py344
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py297
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py113
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py458
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py808
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py189
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/exceptions.py247
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/fields.py179
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/filepost.py95
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py5
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py54
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py260
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/six.py852
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py20
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py156
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/poolmanager.py441
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/request.py149
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/response.py623
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/__init__.py55
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/connection.py131
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/request.py119
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/response.py82
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/retry.py402
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/selectors.py588
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py338
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/timeout.py243
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/url.py231
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/wait.py41
-rw-r--r--collectors/python.d.plugin/rethinkdbs/Makefile.inc13
l---------collectors/python.d.plugin/rethinkdbs/README.md1
-rw-r--r--collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md190
-rw-r--r--collectors/python.d.plugin/rethinkdbs/metadata.yaml188
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py247
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf76
-rw-r--r--collectors/python.d.plugin/retroshare/Makefile.inc13
l---------collectors/python.d.plugin/retroshare/README.md1
-rw-r--r--collectors/python.d.plugin/retroshare/integrations/retroshare.md191
-rw-r--r--collectors/python.d.plugin/retroshare/metadata.yaml144
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py78
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.conf72
-rw-r--r--collectors/python.d.plugin/riakkv/Makefile.inc13
l---------collectors/python.d.plugin/riakkv/README.md1
-rw-r--r--collectors/python.d.plugin/riakkv/integrations/riakkv.md220
-rw-r--r--collectors/python.d.plugin/riakkv/metadata.yaml358
-rw-r--r--collectors/python.d.plugin/riakkv/riakkv.chart.py334
-rw-r--r--collectors/python.d.plugin/riakkv/riakkv.conf68
-rw-r--r--collectors/python.d.plugin/samba/Makefile.inc13
l---------collectors/python.d.plugin/samba/README.md1
-rw-r--r--collectors/python.d.plugin/samba/integrations/samba.md221
-rw-r--r--collectors/python.d.plugin/samba/metadata.yaml205
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py144
-rw-r--r--collectors/python.d.plugin/samba/samba.conf60
-rw-r--r--collectors/python.d.plugin/sensors/Makefile.inc13
l---------collectors/python.d.plugin/sensors/README.md1
-rw-r--r--collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md187
-rw-r--r--collectors/python.d.plugin/sensors/metadata.yaml184
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py179
-rw-r--r--collectors/python.d.plugin/sensors/sensors.conf61
-rw-r--r--collectors/python.d.plugin/smartd_log/Makefile.inc13
l---------collectors/python.d.plugin/smartd_log/README.md1
-rw-r--r--collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md223
-rw-r--r--collectors/python.d.plugin/smartd_log/metadata.yaml429
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py790
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf76
-rw-r--r--collectors/python.d.plugin/spigotmc/Makefile.inc13
l---------collectors/python.d.plugin/spigotmc/README.md1
-rw-r--r--collectors/python.d.plugin/spigotmc/integrations/spigotmc.md216
-rw-r--r--collectors/python.d.plugin/spigotmc/metadata.yaml176
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py184
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf66
-rw-r--r--collectors/python.d.plugin/squid/Makefile.inc13
l---------collectors/python.d.plugin/squid/README.md1
-rw-r--r--collectors/python.d.plugin/squid/integrations/squid.md199
-rw-r--r--collectors/python.d.plugin/squid/metadata.yaml174
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py123
-rw-r--r--collectors/python.d.plugin/squid/squid.conf167
-rw-r--r--collectors/python.d.plugin/tomcat/Makefile.inc13
l---------collectors/python.d.plugin/tomcat/README.md1
-rw-r--r--collectors/python.d.plugin/tomcat/integrations/tomcat.md203
-rw-r--r--collectors/python.d.plugin/tomcat/metadata.yaml200
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py199
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.conf89
-rw-r--r--collectors/python.d.plugin/tor/Makefile.inc13
l---------collectors/python.d.plugin/tor/README.md1
-rw-r--r--collectors/python.d.plugin/tor/integrations/tor.md197
-rw-r--r--collectors/python.d.plugin/tor/metadata.yaml143
-rw-r--r--collectors/python.d.plugin/tor/tor.chart.py109
-rw-r--r--collectors/python.d.plugin/tor/tor.conf81
-rw-r--r--collectors/python.d.plugin/traefik/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/traefik/README.md98
-rw-r--r--collectors/python.d.plugin/traefik/metadata.yaml125
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py198
-rw-r--r--collectors/python.d.plugin/traefik/traefik.conf77
-rw-r--r--collectors/python.d.plugin/uwsgi/Makefile.inc13
l---------collectors/python.d.plugin/uwsgi/README.md1
-rw-r--r--collectors/python.d.plugin/uwsgi/integrations/uwsgi.md219
-rw-r--r--collectors/python.d.plugin/uwsgi/metadata.yaml201
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py177
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.conf92
-rw-r--r--collectors/python.d.plugin/varnish/Makefile.inc13
l---------collectors/python.d.plugin/varnish/README.md1
-rw-r--r--collectors/python.d.plugin/varnish/integrations/varnish.md213
-rw-r--r--collectors/python.d.plugin/varnish/metadata.yaml253
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py385
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf66
-rw-r--r--collectors/python.d.plugin/w1sensor/Makefile.inc13
l---------collectors/python.d.plugin/w1sensor/README.md1
-rw-r--r--collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md167
-rw-r--r--collectors/python.d.plugin/w1sensor/metadata.yaml119
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py97
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.conf72
-rw-r--r--collectors/python.d.plugin/zscores/Makefile.inc12
l---------collectors/python.d.plugin/zscores/README.md1
-rw-r--r--collectors/python.d.plugin/zscores/integrations/python.d_zscores.md195
-rw-r--r--collectors/python.d.plugin/zscores/metadata.yaml187
-rw-r--r--collectors/python.d.plugin/zscores/zscores.chart.py146
-rw-r--r--collectors/python.d.plugin/zscores/zscores.conf108
366 files changed, 60062 insertions, 0 deletions
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
new file mode 100644
index 00000000..ca49c1c0
--- /dev/null
+++ b/collectors/python.d.plugin/Makefile.am
@@ -0,0 +1,229 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ python.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ python.d.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_python_SCRIPTS = \
+ $(NULL)
+
+dist_python_DATA = \
+ $(NULL)
+
+userpythonconfigdir=$(configdir)/python.d
+dist_userpythonconfig_DATA = \
+ $(NULL)
+
+# Explicitly install directories to avoid permission issues due to umask
+install-exec-local:
+ $(INSTALL) -d $(DESTDIR)$(userpythonconfigdir)
+
+pythonconfigdir=$(libconfigdir)/python.d
+dist_pythonconfig_DATA = \
+ $(NULL)
+
+include adaptec_raid/Makefile.inc
+include alarms/Makefile.inc
+include am2320/Makefile.inc
+include anomalies/Makefile.inc
+include beanstalk/Makefile.inc
+include bind_rndc/Makefile.inc
+include boinc/Makefile.inc
+include ceph/Makefile.inc
+include changefinder/Makefile.inc
+include dovecot/Makefile.inc
+include example/Makefile.inc
+include exim/Makefile.inc
+include fail2ban/Makefile.inc
+include gearman/Makefile.inc
+include go_expvar/Makefile.inc
+include haproxy/Makefile.inc
+include hddtemp/Makefile.inc
+include hpssa/Makefile.inc
+include icecast/Makefile.inc
+include ipfs/Makefile.inc
+include litespeed/Makefile.inc
+include megacli/Makefile.inc
+include memcached/Makefile.inc
+include monit/Makefile.inc
+include nvidia_smi/Makefile.inc
+include nsd/Makefile.inc
+include openldap/Makefile.inc
+include oracledb/Makefile.inc
+include pandas/Makefile.inc
+include postfix/Makefile.inc
+include puppet/Makefile.inc
+include rethinkdbs/Makefile.inc
+include retroshare/Makefile.inc
+include riakkv/Makefile.inc
+include samba/Makefile.inc
+include sensors/Makefile.inc
+include smartd_log/Makefile.inc
+include spigotmc/Makefile.inc
+include squid/Makefile.inc
+include tomcat/Makefile.inc
+include tor/Makefile.inc
+include traefik/Makefile.inc
+include uwsgi/Makefile.inc
+include varnish/Makefile.inc
+include w1sensor/Makefile.inc
+include zscores/Makefile.inc
+
+pythonmodulesdir=$(pythondir)/python_modules
+dist_pythonmodules_DATA = \
+ python_modules/__init__.py \
+ $(NULL)
+
+basesdir=$(pythonmodulesdir)/bases
+dist_bases_DATA = \
+ python_modules/bases/__init__.py \
+ python_modules/bases/charts.py \
+ python_modules/bases/collection.py \
+ python_modules/bases/loaders.py \
+ python_modules/bases/loggers.py \
+ $(NULL)
+
+bases_framework_servicesdir=$(basesdir)/FrameworkServices
+dist_bases_framework_services_DATA = \
+ python_modules/bases/FrameworkServices/__init__.py \
+ python_modules/bases/FrameworkServices/ExecutableService.py \
+ python_modules/bases/FrameworkServices/LogService.py \
+ python_modules/bases/FrameworkServices/MySQLService.py \
+ python_modules/bases/FrameworkServices/SimpleService.py \
+ python_modules/bases/FrameworkServices/SocketService.py \
+ python_modules/bases/FrameworkServices/UrlService.py \
+ $(NULL)
+
+third_partydir=$(pythonmodulesdir)/third_party
+dist_third_party_DATA = \
+ python_modules/third_party/__init__.py \
+ python_modules/third_party/ordereddict.py \
+ python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
+ python_modules/third_party/filelock.py \
+ $(NULL)
+
+pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
+dist_pythonyaml2_DATA = \
+ python_modules/pyyaml2/__init__.py \
+ python_modules/pyyaml2/composer.py \
+ python_modules/pyyaml2/constructor.py \
+ python_modules/pyyaml2/cyaml.py \
+ python_modules/pyyaml2/dumper.py \
+ python_modules/pyyaml2/emitter.py \
+ python_modules/pyyaml2/error.py \
+ python_modules/pyyaml2/events.py \
+ python_modules/pyyaml2/loader.py \
+ python_modules/pyyaml2/nodes.py \
+ python_modules/pyyaml2/parser.py \
+ python_modules/pyyaml2/reader.py \
+ python_modules/pyyaml2/representer.py \
+ python_modules/pyyaml2/resolver.py \
+ python_modules/pyyaml2/scanner.py \
+ python_modules/pyyaml2/serializer.py \
+ python_modules/pyyaml2/tokens.py \
+ $(NULL)
+
+pythonyaml3dir=$(pythonmodulesdir)/pyyaml3
+dist_pythonyaml3_DATA = \
+ python_modules/pyyaml3/__init__.py \
+ python_modules/pyyaml3/composer.py \
+ python_modules/pyyaml3/constructor.py \
+ python_modules/pyyaml3/cyaml.py \
+ python_modules/pyyaml3/dumper.py \
+ python_modules/pyyaml3/emitter.py \
+ python_modules/pyyaml3/error.py \
+ python_modules/pyyaml3/events.py \
+ python_modules/pyyaml3/loader.py \
+ python_modules/pyyaml3/nodes.py \
+ python_modules/pyyaml3/parser.py \
+ python_modules/pyyaml3/reader.py \
+ python_modules/pyyaml3/representer.py \
+ python_modules/pyyaml3/resolver.py \
+ python_modules/pyyaml3/scanner.py \
+ python_modules/pyyaml3/serializer.py \
+ python_modules/pyyaml3/tokens.py \
+ $(NULL)
+
+python_urllib3dir=$(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir=$(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir=$(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir=$(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
new file mode 100644
index 00000000..569543d1
--- /dev/null
+++ b/collectors/python.d.plugin/README.md
@@ -0,0 +1,77 @@
+<!--
+title: "python.d.plugin"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/README.md"
+sidebar_label: "python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Developers/External plugins/python.d.plugin"
+-->
+
+# python.d.plugin
+
+`python.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by Netdata
+3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
+
+## Disclaimer
+
+All third party libraries should be installed system-wide or in `python_modules` directory.
+Module configurations are written in YAML and **pyYAML is required**.
+
+Every configuration file must have one of two formats:
+
+- Configuration for only one job:
+
+```yaml
+update_every : 2 # update frequency
+priority : 20000 # where it is shown on dashboard
+
+other_var1 : bla # variables passed to module
+other_var2 : alb
+```
+
+- Configuration for many jobs (ex. mysql):
+
+```yaml
+# module defaults:
+update_every : 2
+priority : 20000
+
+local: # job name
+ update_every : 5 # job update frequency
+ other_var1 : some_val # module specific variable
+
+other_job:
+ priority : 5 # job position on dashboard
+ other_var2 : val # module specific variable
+```
+
+`update_every` and `priority` are always optional.
+
+## How to debug a python module
+
+```
+# become user netdata
+sudo su -s /bin/bash netdata
+```
+
+Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module:
+
+```
+# execute the plugin in debug mode, for a specific module
+/opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
+```
+
+Where `[module]` is the directory name under <https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin>
+
+**Note**: If you would like execute a collector in debug mode while it is still running by Netdata, you can pass the `nolock` CLI option to the above commands.
+
+## How to write a new module
+
+See [develop a custom collector in Python](https://github.com/netdata/netdata/edit/master/docs/guides/python-collector.md).
diff --git a/collectors/python.d.plugin/adaptec_raid/Makefile.inc b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
new file mode 100644
index 00000000..716cdb23
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += adaptec_raid/adaptec_raid.chart.py
+dist_pythonconfig_DATA += adaptec_raid/adaptec_raid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += adaptec_raid/README.md adaptec_raid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
new file mode 120000
index 00000000..97a103eb
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -0,0 +1 @@
+integrations/adaptecraid.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
new file mode 100644
index 00000000..1995ad68
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Description: adaptec_raid netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+from copy import deepcopy
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+
+update_every = 5
+
+ORDER = [
+ 'ld_status',
+ 'pd_state',
+ 'pd_smart_warnings',
+ 'pd_temperature',
+]
+
+CHARTS = {
+ 'ld_status': {
+ 'options': [None, 'Status of logical devices (1: Failed or Degraded)', 'bool', 'logical devices',
+ 'adaptec_raid.ld_status', 'line'],
+ 'lines': []
+ },
+ 'pd_state': {
+ 'options': [None, 'State of physical devices (1: not Online)', 'bool', 'physical devices',
+ 'adaptec_raid.pd_state', 'line'],
+ 'lines': []
+ },
+ 'pd_smart_warnings': {
+ 'options': [None, 'S.M.A.R.T warnings', 'count', 'physical devices',
+ 'adaptec_raid.smart_warnings', 'line'],
+ 'lines': []
+ },
+ 'pd_temperature': {
+ 'options': [None, 'Temperature', 'celsius', 'physical devices', 'adaptec_raid.temperature', 'line'],
+ 'lines': []
+ },
+}
+
+SUDO = 'sudo'
+ARCCONF = 'arcconf'
+
+BAD_LD_STATUS = (
+ 'Degraded',
+ 'Failed',
+)
+
+GOOD_PD_STATUS = (
+ 'Online',
+)
+
+RE_LD = re.compile(
+ r'Logical [dD]evice number\s+([0-9]+).*?'
+ r'Status of [lL]ogical [dD]evice\s+: ([a-zA-Z]+)'
+)
+
+
+def find_lds(d):
+ d = ' '.join(v.strip() for v in d)
+ return [LD(*v) for v in RE_LD.findall(d)]
+
+
+def find_pds(d):
+ pds = list()
+ pd = PD()
+
+ for row in d:
+ row = row.strip()
+ if row.startswith('Device #'):
+ pd = PD()
+ pd.id = row.split('#')[-1]
+ elif not pd.id:
+ continue
+
+ if row.startswith('State'):
+ v = row.split()[-1]
+ pd.state = v
+ elif row.startswith('S.M.A.R.T. warnings'):
+ v = row.split()[-1]
+ pd.smart_warnings = v
+ elif row.startswith('Temperature'):
+ v = row.split(':')[-1].split()[0]
+ pd.temperature = v
+ elif row.startswith(('NCQ status', 'Device Phy')) or not row:
+ if pd.id and pd.state and pd.smart_warnings:
+ pds.append(pd)
+ pd = PD()
+
+ return pds
+
+
+class LD:
+ def __init__(self, ld_id, status):
+ self.id = ld_id
+ self.status = status
+
+ def data(self):
+ return {
+ 'ld_{0}_status'.format(self.id): int(self.status in BAD_LD_STATUS)
+ }
+
+
+class PD:
+ def __init__(self):
+ self.id = None
+ self.state = None
+ self.smart_warnings = None
+ self.temperature = None
+
+ def data(self):
+ data = {
+ 'pd_{0}_state'.format(self.id): int(self.state not in GOOD_PD_STATUS),
+ 'pd_{0}_smart_warnings'.format(self.id): self.smart_warnings,
+ }
+ if self.temperature and self.temperature.isdigit():
+ data['pd_{0}_temperature'.format(self.id)] = self.temperature
+
+ return data
+
+
+class Arcconf:
+ def __init__(self, arcconf):
+ self.arcconf = arcconf
+
+ def ld_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'LD']
+
+ def pd_info(self):
+ return [self.arcconf, 'GETCONFIG', '1', 'PD']
+
+
+# TODO: hardcoded sudo...
+class SudoArcconf:
+ def __init__(self, arcconf, sudo):
+ self.arcconf = Arcconf(arcconf)
+ self.sudo = sudo
+
+ def ld_info(self):
+ return [self.sudo, '-n'] + self.arcconf.ld_info()
+
+ def pd_info(self):
+ return [self.sudo, '-n'] + self.arcconf.pd_info()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.use_sudo = self.configuration.get('use_sudo', True)
+ self.arcconf = None
+
+ def execute(self, command, stderr=False):
+ return self._get_raw_data(command=command, stderr=stderr)
+
+ def check(self):
+ arcconf = find_binary(ARCCONF)
+ if not arcconf:
+ self.error('can\'t locate "{0}" binary'.format(ARCCONF))
+ return False
+
+ sudo = find_binary(SUDO)
+ if self.use_sudo:
+ if not sudo:
+ self.error('can\'t locate "{0}" binary'.format(SUDO))
+ return False
+ err = self.execute([sudo, '-n', '-v'], True)
+ if err:
+ self.error(' '.join(err))
+ return False
+
+ if self.use_sudo:
+ self.arcconf = SudoArcconf(arcconf, sudo)
+ else:
+ self.arcconf = Arcconf(arcconf)
+
+ lds = self.get_lds()
+ if not lds:
+ return False
+
+ self.debug('discovered logical devices ids: {0}'.format([ld.id for ld in lds]))
+
+ pds = self.get_pds()
+ if not pds:
+ return False
+
+ self.debug('discovered physical devices ids: {0}'.format([pd.id for pd in pds]))
+
+ self.update_charts(lds, pds)
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ for ld in self.get_lds():
+ data.update(ld.data())
+
+ for pd in self.get_pds():
+ data.update(pd.data())
+
+ return data
+
+ def get_lds(self):
+ raw_lds = self.execute(self.arcconf.ld_info())
+ if not raw_lds:
+ return None
+
+ lds = find_lds(raw_lds)
+ if not lds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.ld_info())))
+ self.debug('output: {0}'.format(raw_lds))
+ return None
+ return lds
+
+ def get_pds(self):
+ raw_pds = self.execute(self.arcconf.pd_info())
+ if not raw_pds:
+ return None
+
+ pds = find_pds(raw_pds)
+ if not pds:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.pd_info())))
+ self.debug('output: {0}'.format(raw_pds))
+ return None
+ return pds
+
+ def update_charts(self, lds, pds):
+ charts = self.definitions
+ for ld in lds:
+ dim = ['ld_{0}_status'.format(ld.id), 'ld {0}'.format(ld.id)]
+ charts['ld_status']['lines'].append(dim)
+
+ for pd in pds:
+ dim = ['pd_{0}_state'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_state']['lines'].append(dim)
+
+ dim = ['pd_{0}_smart_warnings'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_smart_warnings']['lines'].append(dim)
+
+ dim = ['pd_{0}_temperature'.format(pd.id), 'pd {0}'.format(pd.id)]
+ charts['pd_temperature']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
new file mode 100644
index 00000000..fa462ec8
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
@@ -0,0 +1,53 @@
+# netdata python.d.plugin configuration for adaptec raid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md b/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md
new file mode 100644
index 00000000..13d22ba5
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md
@@ -0,0 +1,204 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/adaptec_raid/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/adaptec_raid/metadata.yaml"
+sidebar_label: "AdaptecRAID"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AdaptecRAID
+
+
+<img src="https://netdata.cloud/img/adaptec.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: adaptec_raid
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives.
+
+
+It uses the arcconf command line utility (from adaptec) to monitor your raid controller.
+
+Executed commands:
+ - `sudo -n arcconf GETCONFIG 1 LD`
+ - `sudo -n arcconf GETCONFIG 1 PD`
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.
+
+### Default Behavior
+
+#### Auto-Detection
+
+After all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per AdaptecRAID instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| adaptec_raid.ld_status | a dimension per logical device | bool |
+| adaptec_raid.pd_state | a dimension per physical device | bool |
+| adaptec_raid.smart_warnings | a dimension per physical device | count |
+| adaptec_raid.temperature | a dimension per physical device | celsius |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ adaptec_raid_ld_status ](https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf) | adaptec_raid.ld_status | logical device status is failed or degraded |
+| [ adaptec_raid_pd_state ](https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf) | adaptec_raid.pd_state | physical device state is not online |
+
+
+## Setup
+
+### Prerequisites
+
+#### Grant permissions for netdata, to run arcconf as sudoer
+
+The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.
+
+Add to your /etc/sudoers file:
+which arcconf shows the full path to the binary.
+
+```bash
+netdata ALL=(root) NOPASSWD: /path/to/arcconf
+```
+
+
+#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)
+
+The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
+
+As root user, do the following:
+
+```bash
+mkdir /etc/systemd/system/netdata.service.d
+echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+systemctl daemon-reload
+systemctl restart netdata.service
+```
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/adaptec_raid.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/adaptec_raid.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration per job
+
+```yaml
+job_name:
+ name: my_job_name
+ update_every: 1 # the JOB's data collection frequency
+ priority: 60000 # the JOB's order on the dashboard
+ penalty: yes # the JOB's penalty
+ autodetection_retry: 0 # the JOB's re-check interval in seconds
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `adaptec_raid` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin adaptec_raid debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/adaptec_raid/metadata.yaml b/collectors/python.d.plugin/adaptec_raid/metadata.yaml
new file mode 100644
index 00000000..c69baff4
--- /dev/null
+++ b/collectors/python.d.plugin/adaptec_raid/metadata.yaml
@@ -0,0 +1,167 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: adaptec_raid
+ monitored_instance:
+ name: AdaptecRAID
+ link: "https://www.microchip.com/en-us/products/storage"
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: "adaptec.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - storage
+ - raid-controller
+ - manage-disks
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives.
+ method_description: |
+ It uses the arcconf command line utility (from adaptec) to monitor your raid controller.
+
+ Executed commands:
+ - `sudo -n arcconf GETCONFIG 1 LD`
+ - `sudo -n arcconf GETCONFIG 1 PD`
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: "The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password."
+ default_behavior:
+ auto_detection:
+ description: "After all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility"
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Grant permissions for netdata, to run arcconf as sudoer
+ description: |
+ The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.
+
+ Add to your /etc/sudoers file:
+ which arcconf shows the full path to the binary.
+
+ ```bash
+ netdata ALL=(root) NOPASSWD: /path/to/arcconf
+ ```
+ - title: Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)
+ description: |
+ The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
+
+ As root user, do the following:
+
+ ```bash
+ mkdir /etc/systemd/system/netdata.service.d
+ echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+ systemctl daemon-reload
+ systemctl restart netdata.service
+ ```
+ configuration:
+ file:
+ name: "python.d/adaptec_raid.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration per job
+ config: |
+ job_name:
+ name: my_job_name
+ update_every: 1 # the JOB's data collection frequency
+ priority: 60000 # the JOB's order on the dashboard
+ penalty: yes # the JOB's penalty
+ autodetection_retry: 0 # the JOB's re-check interval in seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: adaptec_raid_ld_status
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf
+ metric: adaptec_raid.ld_status
+ info: logical device status is failed or degraded
+ - name: adaptec_raid_pd_state
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf
+ metric: adaptec_raid.pd_state
+ info: physical device state is not online
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: adaptec_raid.ld_status
+ description: "Status of logical devices (1: Failed or Degraded)"
+ unit: "bool"
+ chart_type: line
+ dimensions:
+ - name: a dimension per logical device
+ - name: adaptec_raid.pd_state
+ description: "State of physical devices (1: not Online)"
+ unit: "bool"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical device
+ - name: adaptec_raid.smart_warnings
+ description: S.M.A.R.T warnings
+ unit: "count"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical device
+ - name: adaptec_raid.temperature
+ description: Temperature
+ unit: "celsius"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical device
diff --git a/collectors/python.d.plugin/alarms/Makefile.inc b/collectors/python.d.plugin/alarms/Makefile.inc
new file mode 100644
index 00000000..c2de1172
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += alarms/alarms.chart.py
+dist_pythonconfig_DATA += alarms/alarms.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += alarms/README.md alarms/Makefile.inc
+
diff --git a/collectors/python.d.plugin/alarms/README.md b/collectors/python.d.plugin/alarms/README.md
new file mode 120000
index 00000000..85759ae6
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/README.md
@@ -0,0 +1 @@
+integrations/netdata_agent_alarms.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/alarms/alarms.chart.py b/collectors/python.d.plugin/alarms/alarms.chart.py
new file mode 100644
index 00000000..d1942735
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/alarms.chart.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Description: alarms netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+update_every = 10
+disabled_by_default = True
+
+
+def charts_template(sm, alarm_status_chart_type='line'):
+ order = [
+ 'alarms',
+ 'values'
+ ]
+
+ mappings = ', '.join(['{0}={1}'.format(k, v) for k, v in sm.items()])
+ charts = {
+ 'alarms': {
+ 'options': [None, 'Alarms ({0})'.format(mappings), 'status', 'status', 'alarms.status', alarm_status_chart_type],
+ 'lines': [],
+ 'variables': [
+ ['alarms_num'],
+ ]
+ },
+ 'values': {
+ 'options': [None, 'Alarm Values', 'value', 'value', 'alarms.value', 'line'],
+ 'lines': [],
+ }
+ }
+ return order, charts
+
+
+DEFAULT_STATUS_MAP = {'CLEAR': 0, 'WARNING': 1, 'CRITICAL': 2}
+DEFAULT_URL = 'http://127.0.0.1:19999/api/v1/alarms?all'
+DEFAULT_COLLECT_ALARM_VALUES = False
+DEFAULT_ALARM_STATUS_CHART_TYPE = 'line'
+DEFAULT_ALARM_CONTAINS_WORDS = ''
+DEFAULT_ALARM_EXCLUDES_WORDS = ''
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.sm = self.configuration.get('status_map', DEFAULT_STATUS_MAP)
+ self.alarm_status_chart_type = self.configuration.get('alarm_status_chart_type', DEFAULT_ALARM_STATUS_CHART_TYPE)
+ self.order, self.definitions = charts_template(self.sm, self.alarm_status_chart_type)
+ self.url = self.configuration.get('url', DEFAULT_URL)
+ self.collect_alarm_values = bool(self.configuration.get('collect_alarm_values', DEFAULT_COLLECT_ALARM_VALUES))
+ self.collected_dims = {'alarms': set(), 'values': set()}
+ self.alarm_contains_words = self.configuration.get('alarm_contains_words', DEFAULT_ALARM_CONTAINS_WORDS)
+ self.alarm_contains_words_list = [alarm_contains_word.lstrip(' ').rstrip(' ') for alarm_contains_word in self.alarm_contains_words.split(',')]
+ self.alarm_excludes_words = self.configuration.get('alarm_excludes_words', DEFAULT_ALARM_EXCLUDES_WORDS)
+ self.alarm_excludes_words_list = [alarm_excludes_word.lstrip(' ').rstrip(' ') for alarm_excludes_word in self.alarm_excludes_words.split(',')]
+
+ def _get_data(self):
+ raw_data = self._get_raw_data()
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+ alarms = raw_data.get('alarms', {})
+ if self.alarm_contains_words != '':
+ alarms = {alarm_name: alarms[alarm_name] for alarm_name in alarms for alarm_contains_word in
+ self.alarm_contains_words_list if alarm_contains_word in alarm_name}
+ if self.alarm_excludes_words != '':
+ alarms = {alarm_name: alarms[alarm_name] for alarm_name in alarms for alarm_excludes_word in
+ self.alarm_excludes_words_list if alarm_excludes_word not in alarm_name}
+
+ data = {a: self.sm[alarms[a]['status']] for a in alarms if alarms[a]['status'] in self.sm}
+ self.update_charts('alarms', data)
+ data['alarms_num'] = len(data)
+
+ if self.collect_alarm_values:
+ data_values = {'{}_value'.format(a): alarms[a]['value'] * 100 for a in alarms if 'value' in alarms[a] and alarms[a]['value'] is not None}
+ self.update_charts('values', data_values, divisor=100)
+ data.update(data_values)
+
+ return data
+
+ def update_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
+ if not self.charts:
+ return
+
+ for dim in data:
+ if dim not in self.collected_dims[chart]:
+ self.collected_dims[chart].add(dim)
+ self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
+
+ for dim in list(self.collected_dims[chart]):
+ if dim not in data:
+ self.collected_dims[chart].remove(dim)
+ self.charts[chart].del_dimension(dim, hide=False)
diff --git a/collectors/python.d.plugin/alarms/alarms.conf b/collectors/python.d.plugin/alarms/alarms.conf
new file mode 100644
index 00000000..06d76c3b
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/alarms.conf
@@ -0,0 +1,60 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# what url to pull data from
+local:
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ # define how to map alarm status to numbers for the chart
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
+ # set to true to include a chart with calculated alarm values over time
+ collect_alarm_values: false
+ # define the type of chart for plotting status over time e.g. 'line' or 'stacked'
+ alarm_status_chart_type: 'line'
+ # a "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only
+ # alarms with "cpu" or "load" in alarm name. Default includes all.
+ alarm_contains_words: ''
+ # a "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude
+ # all alarms with "cpu" or "load" in alarm name. Default excludes None.
+ alarm_excludes_words: ''
diff --git a/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md b/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md
new file mode 100644
index 00000000..9fb69878
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md
@@ -0,0 +1,201 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/alarms/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/alarms/metadata.yaml"
+sidebar_label: "Netdata Agent alarms"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Other"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Netdata Agent alarms
+
+Plugin: python.d.plugin
+Module: alarms
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector creates an 'Alarms' menu with one line plot of `alarms.status`.
+
+
+Alarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Netdata Agent alarms instance
+
+These metrics refer to the entire monitored application.
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| alarms.status | a dimension per alarm representing the latest status of the alarm. | status |
+| alarms.values | a dimension per alarm representing the latest collected value of the alarm. | value |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/alarms.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/alarms.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes |
+| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {"CLEAR": 0, "WARNING": 1, "CRITICAL": 2} | yes |
+| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes |
+| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | yes |
+| alarm_contains_words | A "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all. | | yes |
+| alarm_excludes_words | A "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None. | | yes |
+| update_every | Sets the default data collection frequency. | 10 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+
+```
+##### Advanced
+
+An advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.
+"ML" job will collect status and values for all alarms with "ml_" in the name. Default job will collect status for all other alarms.
+
+
+<details><summary>Config</summary>
+
+```yaml
+ML:
+ update_every: 5
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
+ collect_alarm_values: true
+ alarm_status_chart_type: 'stacked'
+ alarm_contains_words: 'ml_'
+
+Default:
+ update_every: 5
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
+ collect_alarm_values: false
+ alarm_status_chart_type: 'stacked'
+ alarm_excludes_words: 'ml_'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `alarms` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin alarms debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/alarms/metadata.yaml b/collectors/python.d.plugin/alarms/metadata.yaml
new file mode 100644
index 00000000..30a89778
--- /dev/null
+++ b/collectors/python.d.plugin/alarms/metadata.yaml
@@ -0,0 +1,177 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: alarms
+ monitored_instance:
+ name: Netdata Agent alarms
+ link: https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/alarms/README.md
+ categories:
+ - data-collection.other
+ icon_filename: ""
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - alarms
+ - netdata
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector creates an 'Alarms' menu with one line plot of `alarms.status`.
+ method_description: |
+ Alarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ It discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: python.d/alarms.conf
+ description: ""
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: url
+ description: Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent.
+ default_value: http://127.0.0.1:19999/api/v1/alarms?all
+ required: true
+ - name: status_map
+ description: Mapping of alarm status to integer number that will be the metric value collected.
+ default_value: '{"CLEAR": 0, "WARNING": 1, "CRITICAL": 2}'
+ required: true
+ - name: collect_alarm_values
+ description: set to true to include a chart with calculated alarm values over time.
+ default_value: false
+ required: true
+ - name: alarm_status_chart_type
+ description: define the type of chart for plotting status over time e.g. 'line' or 'stacked'.
+ default_value: "line"
+ required: true
+ - name: alarm_contains_words
+ description: >
+ A "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all.
+ default_value: ""
+ required: true
+ - name: alarm_excludes_words
+ description: >
+ A "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None.
+ default_value: ""
+ required: true
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 10
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: Config
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ - name: Advanced
+ folding:
+ enabled: true
+ description: |
+ An advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.
+ "ML" job will collect status and values for all alarms with "ml_" in the name. Default job will collect status for all other alarms.
+ config: |
+ ML:
+ update_every: 5
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
+ collect_alarm_values: true
+ alarm_status_chart_type: 'stacked'
+ alarm_contains_words: 'ml_'
+
+ Default:
+ update_every: 5
+ url: 'http://127.0.0.1:19999/api/v1/alarms?all'
+ status_map:
+ CLEAR: 0
+ WARNING: 1
+ CRITICAL: 2
+ collect_alarm_values: false
+ alarm_status_chart_type: 'stacked'
+ alarm_excludes_words: 'ml_'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: |
+ These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: alarms.status
+ description: Alarms ({status mapping})
+ unit: "status"
+ chart_type: line
+ dimensions:
+ - name: a dimension per alarm representing the latest status of the alarm.
+ - name: alarms.values
+ description: Alarm Values
+ unit: "value"
+ chart_type: line
+ dimensions:
+ - name: a dimension per alarm representing the latest collected value of the alarm.
diff --git a/collectors/python.d.plugin/am2320/Makefile.inc b/collectors/python.d.plugin/am2320/Makefile.inc
new file mode 100644
index 00000000..48e5a889
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/Makefile.inc
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# install these files
+dist_python_DATA += am2320/am2320.chart.py
+dist_pythonconfig_DATA += am2320/am2320.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += am2320/README.md am2320/Makefile.inc
diff --git a/collectors/python.d.plugin/am2320/README.md b/collectors/python.d.plugin/am2320/README.md
new file mode 120000
index 00000000..0bc5ea90
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/README.md
@@ -0,0 +1 @@
+integrations/am2320.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/am2320/am2320.chart.py b/collectors/python.d.plugin/am2320/am2320.chart.py
new file mode 100644
index 00000000..8e66544b
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/am2320.chart.py
@@ -0,0 +1,68 @@
+# _*_ coding: utf-8 _*_
+# Description: AM2320 netdata module
+# Author: tommybuck
+# SPDX-License-Identifier: GPL-3.0-or-Later
+
+try:
+ import board
+ import busio
+ import adafruit_am2320
+
+ HAS_AM2320 = True
+except ImportError:
+ HAS_AM2320 = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+ORDER = [
+ 'temperature',
+ 'humidity',
+]
+
+CHARTS = {
+ 'temperature': {
+ 'options': [None, 'Temperature', 'celsius', 'temperature', 'am2320.temperature', 'line'],
+ 'lines': [
+ ['temperature']
+ ]
+ },
+ 'humidity': {
+ 'options': [None, 'Relative Humidity', 'percentage', 'humidity', 'am2320.humidity', 'line'],
+ 'lines': [
+ ['humidity']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.am = None
+
+ def check(self):
+ if not HAS_AM2320:
+ self.error("Could not find the adafruit-circuitpython-am2320 package.")
+ return False
+
+ try:
+ i2c = busio.I2C(board.SCL, board.SDA)
+ self.am = adafruit_am2320.AM2320(i2c)
+ except ValueError as error:
+ self.error("error on creating I2C shared bus : {0}".format(error))
+ return False
+
+ return True
+
+ def get_data(self):
+ try:
+ return {
+ 'temperature': self.am.temperature,
+ 'humidity': self.am.relative_humidity,
+ }
+
+ except (OSError, RuntimeError) as error:
+ self.error(error)
+ return None
diff --git a/collectors/python.d.plugin/am2320/am2320.conf b/collectors/python.d.plugin/am2320/am2320.conf
new file mode 100644
index 00000000..c6b9885f
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/am2320.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for am2320 temperature/humidity sensor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# - none
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/am2320/integrations/am2320.md b/collectors/python.d.plugin/am2320/integrations/am2320.md
new file mode 100644
index 00000000..72b351eb
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/integrations/am2320.md
@@ -0,0 +1,181 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/am2320/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/am2320/metadata.yaml"
+sidebar_label: "AM2320"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AM2320
+
+
+<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: am2320
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors AM2320 sensor metrics about temperature and humidity.
+
+It retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+Assuming prerequisites are met, the collector will try to connect to the sensor via i2c
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per AM2320 instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| am2320.temperature | temperature | celsius |
+| am2320.humidity | humidity | percentage |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Sensor connection to a Raspberry Pi
+
+Connect the am2320 to the Raspberry Pi I2C pins
+
+Raspberry Pi 3B/4 Pins:
+
+- Board 3.3V (pin 1) to sensor VIN (pin 1)
+- Board SDA (pin 3) to sensor SDA (pin 2)
+- Board GND (pin 6) to sensor GND (pin 3)
+- Board SCL (pin 5) to sensor SCL (pin 4)
+
+You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.
+
+
+#### Software requirements
+
+Install the Adafruit Circuit Python AM2320 library:
+
+`sudo pip3 install adafruit-circuitpython-am2320`
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/am2320.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/am2320.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+
+##### Local sensor
+
+A basic JOB configuration
+
+```yaml
+local_sensor:
+ name: 'Local AM2320'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin am2320 debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/am2320/metadata.yaml b/collectors/python.d.plugin/am2320/metadata.yaml
new file mode 100644
index 00000000..c85cd5f2
--- /dev/null
+++ b/collectors/python.d.plugin/am2320/metadata.yaml
@@ -0,0 +1,135 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: am2320
+ monitored_instance:
+ name: AM2320
+ link: 'https://learn.adafruit.com/adafruit-am2320-temperature-humidity-i2c-sensor/overview'
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ icon_filename: 'microchip.svg'
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ''
+ keywords:
+ - temperature
+ - am2320
+ - sensor
+ - humidity
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: 'This collector monitors AM2320 sensor metrics about temperature and humidity.'
+ method_description: 'It retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.'
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ''
+ default_behavior:
+ auto_detection:
+ description: 'Assuming prerequisites are met, the collector will try to connect to the sensor via i2c'
+ limits:
+ description: ''
+ performance_impact:
+ description: ''
+ setup:
+ prerequisites:
+ list:
+ - title: 'Sensor connection to a Raspberry Pi'
+ description: |
+ Connect the am2320 to the Raspberry Pi I2C pins
+
+ Raspberry Pi 3B/4 Pins:
+
+ - Board 3.3V (pin 1) to sensor VIN (pin 1)
+ - Board SDA (pin 3) to sensor SDA (pin 2)
+ - Board GND (pin 6) to sensor GND (pin 3)
+ - Board SCL (pin 5) to sensor SCL (pin 4)
+
+ You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.
+ - title: 'Software requirements'
+ description: |
+ Install the Adafruit Circuit Python AM2320 library:
+
+ `sudo pip3 install adafruit-circuitpython-am2320`
+ configuration:
+ file:
+ name: python.d/am2320.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ''
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Local sensor
+ description: A basic JOB configuration
+ config: |
+ local_sensor:
+ name: 'Local AM2320'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: am2320.temperature
+ description: Temperature
+ unit: "celsius"
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: am2320.humidity
+ description: Relative Humidity
+ unit: "percentage"
+ chart_type: line
+ dimensions:
+ - name: humidity
diff --git a/collectors/python.d.plugin/anomalies/Makefile.inc b/collectors/python.d.plugin/anomalies/Makefile.inc
new file mode 100644
index 00000000..94937b36
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += anomalies/anomalies.chart.py
+dist_pythonconfig_DATA += anomalies/anomalies.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += anomalies/README.md anomalies/Makefile.inc
+
diff --git a/collectors/python.d.plugin/anomalies/README.md b/collectors/python.d.plugin/anomalies/README.md
new file mode 100644
index 00000000..80f50537
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/README.md
@@ -0,0 +1,248 @@
+<!--
+title: "Anomaly detection with Netdata"
+description: "Use ML-driven anomaly detection to narrow your focus to only affected metrics and services/processes on your node to shorten root cause analysis."
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/anomalies/README.md"
+sidebar_url: "Anomalies"
+sidebar_label: "anomalies"
+learn_status: "Published"
+learn_rel_path: "Integrations/Monitor/Anything"
+-->
+
+# Anomaly detection with Netdata
+
+**Note**: Check out the [Netdata Anomaly Advisor](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.md) for a more native anomaly detection experience within Netdata.
+
+This collector uses the Python [PyOD](https://pyod.readthedocs.io/en/latest/index.html) library to perform unsupervised [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) on your Netdata charts and/or dimensions.
+
+Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return an anomaly probability and anomaly flag for each chart or custom model you define. This computation consists of a **train** function that runs every `train_n_secs` to train the ML models to learn what 'normal' typically looks like on your node. At each iteration there is also a **predict** function that uses the latest trained models and most recent metrics to produce an anomaly probability and anomaly flag for each chart or custom model you define.
+
+> As this is a somewhat unique collector and involves often subjective concepts like anomalies and anomaly probabilities, we would love to hear any feedback on it from the community. Please let us know on the [community forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767) or drop us a note at [analytics-ml-team@netdata.cloud](mailto:analytics-ml-team@netdata.cloud) for any and all feedback, both positive and negative. This sort of feedback is priceless to help us make complex features more useful.
+
+## Charts
+
+Two charts are produced:
+
+- **Anomaly Probability** (`anomalies.probability`): This chart shows the probability that the latest observed data is anomalous based on the trained model for that chart (using the [`predict_proba()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict_proba) method of the trained PyOD model).
+- **Anomaly** (`anomalies.anomaly`): This chart shows `1` or `0` predictions of if the latest observed data is considered anomalous or not based on the trained model (using the [`predict()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict) method of the trained PyOD model).
+
+Below is an example of the charts produced by this collector and how they might look when things are 'normal' on the node. The anomaly probabilities tend to bounce randomly around a typically low probability range, one or two might randomly jump or drift outside of this range every now and then and show up as anomalies on the anomaly chart.
+
+![netdata-anomalies-collector-normal](https://user-images.githubusercontent.com/2178292/100663699-99755000-334e-11eb-922f-0c41a0176484.jpg)
+
+If we then go onto the system and run a command like `stress-ng --all 2` to create some [stress](https://wiki.ubuntu.com/Kernel/Reference/stress-ng), we see some charts begin to have anomaly probabilities that jump outside the typical range. When the anomaly probabilities change enough, we will start seeing anomalies being flagged on the `anomalies.anomaly` chart. The idea is that these charts are the most anomalous right now so could be a good place to start your troubleshooting.
+
+![netdata-anomalies-collector-abnormal](https://user-images.githubusercontent.com/2178292/100663710-9bd7aa00-334e-11eb-9d14-76fda73bc309.jpg)
+
+Then, as the issue passes, the anomaly probabilities should settle back down into their 'normal' range again.
+
+![netdata-anomalies-collector-normal-again](https://user-images.githubusercontent.com/2178292/100666681-481a9000-3351-11eb-9979-64728ee2dfb6.jpg)
+
+## Requirements
+
+- This collector will only work with Python 3 and requires the packages below be installed.
+- Typically you will not need to do this, but, if needed, to ensure Python 3 is used you can add the below line to the `[plugin:python.d]` section of `netdata.conf`
+
+```conf
+[plugin:python.d]
+ # update every = 1
+ command options = -ppython3
+```
+
+Install the required python libraries.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# install required packages for the netdata user
+pip3 install --user netdata-pandas==0.0.38 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3
+```
+
+## Configuration
+
+Install the Python requirements above, enable the collector and restart Netdata.
+
+```bash
+cd /etc/netdata/
+sudo ./edit-config python.d.conf
+# Set `anomalies: no` to `anomalies: yes`
+sudo systemctl restart netdata
+```
+
+The configuration for the anomalies collector defines how it will behave on your system and might take some experimentation with over time to set it optimally for your node. Out of the box, the config comes with some [sane defaults](https://www.netdata.cloud/blog/redefining-monitoring-netdata/) to get you started that try to balance the flexibility and power of the ML models with the goal of being as cheap as possible in term of cost on the node resources.
+
+_**Note**: If you are unsure about any of the below configuration options then it's best to just ignore all this and leave the `anomalies.conf` file alone to begin with. Then you can return to it later if you would like to tune things a bit more once the collector is running for a while and you have a feeling for its performance on your node._
+
+Edit the `python.d/anomalies.conf` configuration file using `edit-config` from the your agent's [config
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/anomalies.conf
+```
+
+The default configuration should look something like this. Here you can see each parameter (with sane defaults) and some information about each one and what it does.
+
+```conf
+# -
+# JOBS (data collection sources)
+
+# Pull data from local Netdata node.
+anomalies:
+ name: 'Anomalies'
+
+ # Host to pull data from.
+ host: '127.0.0.1:19999'
+
+ # Username and Password for Netdata if using basic auth.
+ # username: '???'
+ # password: '???'
+
+ # Use http or https to pull data
+ protocol: 'http'
+
+ # SSL verify parameter for requests.get() calls
+ tls_verify: true
+
+ # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: 'system.uptime,system.entropy'
+
+ # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
+ # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
+ model: 'pca'
+
+ # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
+ train_max_n: 100000
+
+ # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
+ # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
+ train_every_n: 1800
+
+ # The length of the window of data to train on (14400 = last 4 hours).
+ train_n_secs: 14400
+
+ # How many prediction steps after a train event to just use previous prediction value for.
+ # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
+ train_no_prediction_n: 10
+
+ # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
+ # Start of training data for initial model.
+ # initial_train_data_after: 1604578857
+
+ # End of training data for initial model.
+ # initial_train_data_before: 1604593257
+
+ # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
+ offset_n_secs: 0
+
+ # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
+ lags_n: 5
+
+ # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
+ smooth_n: 3
+
+ # How many differences to take in preprocessing your data.
+ # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
+ # diffs_n=0 would mean training models on the raw values of each dimension.
+ # diffs_n=1 means everything is done in terms of differences.
+ diffs_n: 1
+
+ # What is the typical proportion of anomalies in your data on average?
+ # This parameter can control the sensitivity of your models to anomalies.
+ # Some discussion here: https://github.com/yzhao062/pyod/issues/144
+ contamination: 0.001
+
+ # Set to true to include an "average_prob" dimension on anomalies probability chart which is
+ # just the average of all anomaly probabilities at each time step
+ include_average_prob: true
+
+ # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
+ # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
+ # and one on the cpu and mem apps metrics for the python.d.plugin.
+ # custom_models:
+ # - name: 'demos_cpu'
+ # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+ # - name: 'apps_python_d_plugin'
+ # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
+
+ # Set to true to normalize, using min-max standardization, features used for the custom models.
+ # Useful if your custom models contain dimensions on very different scales an model you use does
+ # not internally do its own normalization. Usually best to leave as false.
+ # custom_models_normalize: false
+```
+
+## Custom models
+
+In the `anomalies.conf` file you can also define some "custom models" which you can use to group one or more metrics into a single model much like is done by default for the charts you specify. This is useful if you have a handful of metrics that exist in different charts but perhaps are related to the same underlying thing you would like to perform anomaly detection on, for example a specific app or user.
+
+To define a custom model you would include configuration like below in `anomalies.conf`. By default there should already be some commented out examples in there.
+
+`name` is a name you give your custom model, this is what will appear alongside any other specified charts in the `anomalies.probability` and `anomalies.anomaly` charts. `dimensions` is a string of metrics you want to include in your custom model. By default the [netdata-pandas](https://github.com/netdata/netdata-pandas) library used to pull the data from Netdata uses a "chart.a|dim.1" type of naming convention in the pandas columns it returns, hence the `dimensions` string should look like "chart.name|dimension.name,chart.name|dimension.name". The examples below hopefully make this clear.
+
+```yaml
+custom_models:
+ # a model for anomaly detection on the netdata user in terms of cpu, mem, threads, processes and sockets.
+ - name: 'user_netdata'
+ dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
+ # a model for anomaly detection on the netdata python.d.plugin app in terms of cpu, mem, threads, processes and sockets.
+ - name: 'apps_python_d_plugin'
+ dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
+
+custom_models_normalize: false
+```
+
+## Troubleshooting
+
+To see any relevant log messages you can use a command like below.
+
+```bash
+`grep 'anomalies' /var/log/netdata/error.log`
+```
+
+If you would like to log in as `netdata` user and run the collector in debug mode to see more detail.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# run collector in debug using `nolock` option if netdata is already running the collector itself.
+/usr/libexec/netdata/plugins.d/python.d.plugin anomalies debug trace nolock
+```
+
+## Deepdive tutorial
+
+If you would like to go deeper on what exactly the anomalies collector is doing under the hood then check out this [deepdive tutorial](https://github.com/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb) in our community repo where you can play around with some data from our demo servers (or your own if its accessible to you) and work through the calculations step by step.
+
+(Note: as its a Jupyter Notebook it might render a little prettier on [nbviewer](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb))
+
+## Notes
+
+- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the required data for each chart.
+- Python 3 is also required for the underlying ML libraries of [numba](https://pypi.org/project/numba/), [scikit-learn](https://pypi.org/project/scikit-learn/), and [PyOD](https://pypi.org/project/pyod/).
+- It may take a few hours or so (depending on your choice of `train_secs_n`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and probabilities you will see in the normal running of your node.
+- As this collector does most of the work in Python itself, with [PyOD](https://pyod.readthedocs.io/en/latest/) leveraging [numba](https://numba.pydata.org/) under the hood, you may want to try it out first on a test or development system to get a sense of its performance characteristics on a node similar to where you would like to use it.
+- `lags_n`, `smooth_n`, and `diffs_n` together define the preprocessing done to the raw data before models are trained and before each prediction. This essentially creates a [feature vector](https://en.wikipedia.org/wiki/Feature_(machine_learning)#:~:text=In%20pattern%20recognition%20and%20machine,features%20that%20represent%20some%20object.&text=Feature%20vectors%20are%20often%20combined,score%20for%20making%20a%20prediction.) for each chart model (or each custom model). The default settings for these parameters aim to create a rolling matrix of recent smoothed [differenced](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing) values for each chart. The aim of the model then is to score how unusual this 'matrix' of features is for each chart based on what it has learned as 'normal' from the training data. So as opposed to just looking at the single most recent value of a dimension and considering how strange it is, this approach looks at a recent smoothed window of all dimensions for a chart (or dimensions in a custom model) and asks how unusual the data as a whole looks. This should be more flexible in capturing a wider range of [anomaly types](https://andrewm4894.com/2020/10/19/different-types-of-time-series-anomalies/) and be somewhat more robust to temporary 'spikes' in the data that tend to always be happening somewhere in your metrics but often are not the most important type of anomaly (this is all covered in a lot more detail in the [deepdive tutorial](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb)).
+- You can see how long model training is taking by looking in the logs for the collector `grep 'anomalies' /var/log/netdata/error.log | grep 'training'` and you should see lines like `2020-12-01 22:02:14: python.d INFO: anomalies[local] : training complete in 2.81 seconds (runs_counter=2700, model=pca, train_n_secs=14400, models=26, n_fit_success=26, n_fit_fails=0, after=1606845731, before=1606860131).`.
+ - This also gives counts of the number of models, if any, that failed to fit and so had to default back to the DefaultModel (which is currently [HBOS](https://pyod.readthedocs.io/en/latest/_modules/pyod/models/hbos.html)).
+ - `after` and `before` here refer to the start and end of the training data used to train the models.
+- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the typical performance characteristics we saw from running this collector (with defaults) were:
+ - A runtime (`netdata.runtime_anomalies`) of ~80ms when doing scoring and ~3 seconds when training or retraining the models.
+ - Typically ~3%-3.5% additional cpu usage from scoring, jumping to ~60% for a couple of seconds during model training.
+ - About ~150mb of ram (`apps.mem`) being continually used by the `python.d.plugin`.
+- If you activate this collector on a fresh node, it might take a little while to build up enough data to calculate a realistic and useful model.
+- Some models like `iforest` can be comparatively expensive (on same n1-standard-2 system above ~2s runtime during predict, ~40s training time, ~50% cpu on both train and predict) so if you would like to use it you might be advised to set a relatively high `update_every` maybe 10, 15 or 30 in `anomalies.conf`.
+- Setting a higher `train_every_n` and `update_every` is an easy way to devote less resources on the node to anomaly detection. Specifying less charts and a lower `train_n_secs` will also help reduce resources at the expense of covering less charts and maybe a more noisy model if you set `train_n_secs` to be too small for how your node tends to behave.
+- If you would like to enable this on a Raspberry Pi, then check out [this guide](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/raspberry-pi-anomaly-detection.md) which will guide you through first installing LLVM.
+
+## Useful links and further reading
+
+- [PyOD documentation](https://pyod.readthedocs.io/en/latest/), [PyOD Github](https://github.com/yzhao062/pyod).
+- [Anomaly Detection](https://en.wikipedia.org/wiki/Anomaly_detection) wikipedia page.
+- [Anomaly Detection YouTube playlist](https://www.youtube.com/playlist?list=PL6Zhl9mK2r0KxA6rB87oi4kWzoqGd5vp0) maintained by [andrewm4894](https://github.com/andrewm4894/) from Netdata.
+- [awesome-TS-anomaly-detection](https://github.com/rob-med/awesome-TS-anomaly-detection) Github list of useful tools, libraries and resources.
+- [Mendeley public group](https://www.mendeley.com/community/interesting-anomaly-detection-papers/) with some interesting anomaly detection papers we have been reading.
+- Good [blog post](https://www.anodot.com/blog/what-is-anomaly-detection/) from Anodot on time series anomaly detection. Anodot also have some great whitepapers in this space too that some may find useful.
+- Novelty and outlier detection in the [scikit-learn documentation](https://scikit-learn.org/stable/modules/outlier_detection.html).
+
diff --git a/collectors/python.d.plugin/anomalies/anomalies.chart.py b/collectors/python.d.plugin/anomalies/anomalies.chart.py
new file mode 100644
index 00000000..24e84cc1
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/anomalies.chart.py
@@ -0,0 +1,425 @@
+# -*- coding: utf-8 -*-
+# Description: anomalies netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import sys
+import time
+from datetime import datetime
+import re
+import warnings
+
+import requests
+import numpy as np
+import pandas as pd
+from netdata_pandas.data import get_data, get_allmetrics_async
+from pyod.models.hbos import HBOS
+from pyod.models.pca import PCA
+from pyod.models.loda import LODA
+from pyod.models.iforest import IForest
+from pyod.models.cblof import CBLOF
+from pyod.models.feature_bagging import FeatureBagging
+from pyod.models.copod import COPOD
+from sklearn.preprocessing import MinMaxScaler
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# ignore some sklearn/numpy warnings that are ok
+warnings.filterwarnings('ignore', r'All-NaN slice encountered')
+warnings.filterwarnings('ignore', r'invalid value encountered in true_divide')
+warnings.filterwarnings('ignore', r'divide by zero encountered in true_divide')
+warnings.filterwarnings('ignore', r'invalid value encountered in subtract')
+
+disabled_by_default = True
+
+ORDER = ['probability', 'anomaly']
+
+CHARTS = {
+ 'probability': {
+ 'options': ['probability', 'Anomaly Probability', 'probability', 'anomalies', 'anomalies.probability', 'line'],
+ 'lines': []
+ },
+ 'anomaly': {
+ 'options': ['anomaly', 'Anomaly', 'count', 'anomalies', 'anomalies.anomaly', 'stacked'],
+ 'lines': []
+ },
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.basic_init()
+ self.charts_init()
+ self.custom_models_init()
+ self.data_init()
+ self.model_params_init()
+ self.models_init()
+ self.collected_dims = {'probability': set(), 'anomaly': set()}
+
+ def check(self):
+ if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):
+ self.error("anomalies collector only works with Python>=3.6")
+ if len(self.host_charts_dict[self.host]) > 0:
+ _ = get_allmetrics_async(host_charts_dict=self.host_charts_dict, protocol=self.protocol, user=self.username, pwd=self.password)
+ return True
+
+ def basic_init(self):
+ """Perform some basic initialization.
+ """
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.protocol = self.configuration.get('protocol', 'http')
+ self.host = self.configuration.get('host', '127.0.0.1:19999')
+ self.username = self.configuration.get('username', None)
+ self.password = self.configuration.get('password', None)
+ self.tls_verify = self.configuration.get('tls_verify', True)
+ self.fitted_at = {}
+ self.df_allmetrics = pd.DataFrame()
+ self.last_train_at = 0
+ self.include_average_prob = bool(self.configuration.get('include_average_prob', True))
+ self.reinitialize_at_every_step = bool(self.configuration.get('reinitialize_at_every_step', False))
+
+ def charts_init(self):
+ """Do some initialisation of charts in scope related variables.
+ """
+ self.charts_regex = re.compile(self.configuration.get('charts_regex','None'))
+ self.charts_available = [c for c in list(requests.get(f'{self.protocol}://{self.host}/api/v1/charts', verify=self.tls_verify).json().get('charts', {}).keys())]
+ self.charts_in_scope = list(filter(self.charts_regex.match, self.charts_available))
+ self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
+ if len(self.charts_to_exclude) > 0:
+ self.charts_in_scope = [c for c in self.charts_in_scope if c not in self.charts_to_exclude]
+
+ def custom_models_init(self):
+ """Perform initialization steps related to custom models.
+ """
+ self.custom_models = self.configuration.get('custom_models', None)
+ self.custom_models_normalize = bool(self.configuration.get('custom_models_normalize', False))
+ if self.custom_models:
+ self.custom_models_names = [model['name'] for model in self.custom_models]
+ self.custom_models_dims = [i for s in [model['dimensions'].split(',') for model in self.custom_models] for i in s]
+ self.custom_models_dims = [dim if '::' in dim else f'{self.host}::{dim}' for dim in self.custom_models_dims]
+ self.custom_models_charts = list(set([dim.split('|')[0].split('::')[1] for dim in self.custom_models_dims]))
+ self.custom_models_hosts = list(set([dim.split('::')[0] for dim in self.custom_models_dims]))
+ self.custom_models_host_charts_dict = {}
+ for host in self.custom_models_hosts:
+ self.custom_models_host_charts_dict[host] = list(set([dim.split('::')[1].split('|')[0] for dim in self.custom_models_dims if dim.startswith(host)]))
+ self.custom_models_dims_renamed = [f"{model['name']}|{dim}" for model in self.custom_models for dim in model['dimensions'].split(',')]
+ self.models_in_scope = list(set([f'{self.host}::{c}' for c in self.charts_in_scope] + self.custom_models_names))
+ self.charts_in_scope = list(set(self.charts_in_scope + self.custom_models_charts))
+ self.host_charts_dict = {self.host: self.charts_in_scope}
+ for host in self.custom_models_host_charts_dict:
+ if host not in self.host_charts_dict:
+ self.host_charts_dict[host] = self.custom_models_host_charts_dict[host]
+ else:
+ for chart in self.custom_models_host_charts_dict[host]:
+ if chart not in self.host_charts_dict[host]:
+ self.host_charts_dict[host].extend(chart)
+ else:
+ self.models_in_scope = [f'{self.host}::{c}' for c in self.charts_in_scope]
+ self.host_charts_dict = {self.host: self.charts_in_scope}
+ self.model_display_names = {model: model.split('::')[1] if '::' in model else model for model in self.models_in_scope}
+ #self.info(f'self.host_charts_dict (len={len(self.host_charts_dict[self.host])}): {self.host_charts_dict}')
+
+ def data_init(self):
+ """Initialize some empty data objects.
+ """
+ self.data_probability_latest = {f'{m}_prob': 0 for m in self.charts_in_scope}
+ self.data_anomaly_latest = {f'{m}_anomaly': 0 for m in self.charts_in_scope}
+ self.data_latest = {**self.data_probability_latest, **self.data_anomaly_latest}
+
+ def model_params_init(self):
+ """Model parameters initialisation.
+ """
+ self.train_max_n = self.configuration.get('train_max_n', 100000)
+ self.train_n_secs = self.configuration.get('train_n_secs', 14400)
+ self.offset_n_secs = self.configuration.get('offset_n_secs', 0)
+ self.train_every_n = self.configuration.get('train_every_n', 1800)
+ self.train_no_prediction_n = self.configuration.get('train_no_prediction_n', 10)
+ self.initial_train_data_after = self.configuration.get('initial_train_data_after', 0)
+ self.initial_train_data_before = self.configuration.get('initial_train_data_before', 0)
+ self.contamination = self.configuration.get('contamination', 0.001)
+ self.lags_n = {model: self.configuration.get('lags_n', 5) for model in self.models_in_scope}
+ self.smooth_n = {model: self.configuration.get('smooth_n', 5) for model in self.models_in_scope}
+ self.diffs_n = {model: self.configuration.get('diffs_n', 5) for model in self.models_in_scope}
+
+ def models_init(self):
+ """Models initialisation.
+ """
+ self.model = self.configuration.get('model', 'pca')
+ if self.model == 'pca':
+ self.models = {model: PCA(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'loda':
+ self.models = {model: LODA(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'iforest':
+ self.models = {model: IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'cblof':
+ self.models = {model: CBLOF(n_clusters=3, contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'feature_bagging':
+ self.models = {model: FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'copod':
+ self.models = {model: COPOD(contamination=self.contamination) for model in self.models_in_scope}
+ elif self.model == 'hbos':
+ self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
+ else:
+ self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
+ self.custom_model_scalers = {model: MinMaxScaler() for model in self.models_in_scope}
+
+ def model_init(self, model):
+ """Model initialisation of a single model.
+ """
+ if self.model == 'pca':
+ self.models[model] = PCA(contamination=self.contamination)
+ elif self.model == 'loda':
+ self.models[model] = LODA(contamination=self.contamination)
+ elif self.model == 'iforest':
+ self.models[model] = IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination)
+ elif self.model == 'cblof':
+ self.models[model] = CBLOF(n_clusters=3, contamination=self.contamination)
+ elif self.model == 'feature_bagging':
+ self.models[model] = FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination)
+ elif self.model == 'copod':
+ self.models[model] = COPOD(contamination=self.contamination)
+ elif self.model == 'hbos':
+ self.models[model] = HBOS(contamination=self.contamination)
+ else:
+ self.models[model] = HBOS(contamination=self.contamination)
+ self.custom_model_scalers[model] = MinMaxScaler()
+
+ def reinitialize(self):
+ """Reinitialize charts, models and data to a beginning state.
+ """
+ self.charts_init()
+ self.custom_models_init()
+ self.data_init()
+ self.model_params_init()
+ self.models_init()
+
+ def save_data_latest(self, data, data_probability, data_anomaly):
+ """Save the most recent data objects to be used if needed in the future.
+ """
+ self.data_latest = data
+ self.data_probability_latest = data_probability
+ self.data_anomaly_latest = data_anomaly
+
+ def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
+ """If dimension not in chart then add it.
+ """
+ for dim in data:
+ if dim not in self.collected_dims[chart]:
+ self.collected_dims[chart].add(dim)
+ self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
+
+ for dim in list(self.collected_dims[chart]):
+ if dim not in data:
+ self.collected_dims[chart].remove(dim)
+ self.charts[chart].del_dimension(dim, hide=False)
+
+ def add_custom_models_dims(self, df):
+ """Given a df, select columns used by custom models, add custom model name as prefix, and append to df.
+
+ :param df <pd.DataFrame>: dataframe to append new renamed columns to.
+ :return: <pd.DataFrame> dataframe with additional columns added relating to the specified custom models.
+ """
+ df_custom = df[self.custom_models_dims].copy()
+ df_custom.columns = self.custom_models_dims_renamed
+ df = df.join(df_custom)
+
+ return df
+
+ def make_features(self, arr, train=False, model=None):
+ """Take in numpy array and preprocess accordingly by taking diffs, smoothing and adding lags.
+
+ :param arr <np.ndarray>: numpy array we want to make features from.
+ :param train <bool>: True if making features for training, in which case need to fit_transform scaler and maybe sample train_max_n.
+ :param model <str>: model to make features for.
+ :return: <np.ndarray> transformed numpy array.
+ """
+
+ def lag(arr, n):
+ res = np.empty_like(arr)
+ res[:n] = np.nan
+ res[n:] = arr[:-n]
+
+ return res
+
+ arr = np.nan_to_num(arr)
+
+ diffs_n = self.diffs_n[model]
+ smooth_n = self.smooth_n[model]
+ lags_n = self.lags_n[model]
+
+ if self.custom_models_normalize and model in self.custom_models_names:
+ if train:
+ arr = self.custom_model_scalers[model].fit_transform(arr)
+ else:
+ arr = self.custom_model_scalers[model].transform(arr)
+
+ if diffs_n > 0:
+ arr = np.diff(arr, diffs_n, axis=0)
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if smooth_n > 1:
+ arr = np.cumsum(arr, axis=0, dtype=float)
+ arr[smooth_n:] = arr[smooth_n:] - arr[:-smooth_n]
+ arr = arr[smooth_n - 1:] / smooth_n
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if lags_n > 0:
+ arr_orig = np.copy(arr)
+ for lag_n in range(1, lags_n + 1):
+ arr = np.concatenate((arr, lag(arr_orig, lag_n)), axis=1)
+ arr = arr[~np.isnan(arr).any(axis=1)]
+
+ if train:
+ if len(arr) > self.train_max_n:
+ arr = arr[np.random.randint(arr.shape[0], size=self.train_max_n), :]
+
+ arr = np.nan_to_num(arr)
+
+ return arr
+
+ def train(self, models_to_train=None, train_data_after=0, train_data_before=0):
+ """Pull required training data and train a model for each specified model.
+
+ :param models_to_train <list>: list of models to train on.
+ :param train_data_after <int>: integer timestamp for start of train data.
+ :param train_data_before <int>: integer timestamp for end of train data.
+ """
+ now = datetime.now().timestamp()
+ if train_data_after > 0 and train_data_before > 0:
+ before = train_data_before
+ after = train_data_after
+ else:
+ before = int(now) - self.offset_n_secs
+ after = before - self.train_n_secs
+
+ # get training data
+ df_train = get_data(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', after=after, before=before,
+ sort_cols=True, numeric_only=True, protocol=self.protocol, float_size='float32', user=self.username, pwd=self.password,
+ verify=self.tls_verify
+ ).ffill()
+ if self.custom_models:
+ df_train = self.add_custom_models_dims(df_train)
+
+ # train model
+ self.try_fit(df_train, models_to_train=models_to_train)
+ self.info(f'training complete in {round(time.time() - now, 2)} seconds (runs_counter={self.runs_counter}, model={self.model}, train_n_secs={self.train_n_secs}, models={len(self.fitted_at)}, n_fit_success={self.n_fit_success}, n_fit_fails={self.n_fit_fail}, after={after}, before={before}).')
+ self.last_train_at = self.runs_counter
+
+ def try_fit(self, df_train, models_to_train=None):
+ """Try fit each model and try to fallback to a default model if fit fails for any reason.
+
+ :param df_train <pd.DataFrame>: data to train on.
+ :param models_to_train <list>: list of models to train.
+ """
+ if models_to_train is None:
+ models_to_train = list(self.models.keys())
+ self.n_fit_fail, self.n_fit_success = 0, 0
+ for model in models_to_train:
+ if model not in self.models:
+ self.model_init(model)
+ X_train = self.make_features(
+ df_train[df_train.columns[df_train.columns.str.startswith(f'{model}|')]].values,
+ train=True, model=model)
+ try:
+ self.models[model].fit(X_train)
+ self.n_fit_success += 1
+ except Exception as e:
+ self.n_fit_fail += 1
+ self.info(e)
+ self.info(f'training failed for {model} at run_counter {self.runs_counter}, defaulting to hbos model.')
+ self.models[model] = HBOS(contamination=self.contamination)
+ self.models[model].fit(X_train)
+ self.fitted_at[model] = self.runs_counter
+
+ def predict(self):
+ """Get latest data, make it into a feature vector, and get predictions for each available model.
+
+ :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
+ """
+ # get recent data to predict on
+ df_allmetrics = get_allmetrics_async(
+ host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True,
+ protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password
+ )
+ if self.custom_models:
+ df_allmetrics = self.add_custom_models_dims(df_allmetrics)
+ self.df_allmetrics = self.df_allmetrics.append(df_allmetrics).ffill().tail((max(self.lags_n.values()) + max(self.smooth_n.values()) + max(self.diffs_n.values())) * 2)
+
+ # get predictions
+ data_probability, data_anomaly = self.try_predict()
+
+ return data_probability, data_anomaly
+
+ def try_predict(self):
+ """Try make prediction and fall back to last known prediction if fails.
+
+ :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
+ """
+ data_probability, data_anomaly = {}, {}
+ for model in self.fitted_at.keys():
+ model_display_name = self.model_display_names[model]
+ try:
+ X_model = np.nan_to_num(
+ self.make_features(
+ self.df_allmetrics[self.df_allmetrics.columns[self.df_allmetrics.columns.str.startswith(f'{model}|')]].values,
+ model=model
+ )[-1,:].reshape(1, -1)
+ )
+ data_probability[model_display_name + '_prob'] = np.nan_to_num(self.models[model].predict_proba(X_model)[-1][1]) * 10000
+ data_anomaly[model_display_name + '_anomaly'] = self.models[model].predict(X_model)[-1]
+ except Exception as _:
+ #self.info(e)
+ if model_display_name + '_prob' in self.data_latest:
+ #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, using last prediction instead.')
+ data_probability[model_display_name + '_prob'] = self.data_latest[model_display_name + '_prob']
+ data_anomaly[model_display_name + '_anomaly'] = self.data_latest[model_display_name + '_anomaly']
+ else:
+ #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, skipping as no previous prediction.')
+ continue
+
+ return data_probability, data_anomaly
+
+ def get_data(self):
+
+ # initialize to what's available right now
+ if self.reinitialize_at_every_step or len(self.host_charts_dict[self.host]) == 0:
+ self.charts_init()
+ self.custom_models_init()
+ self.model_params_init()
+
+ # if not all models have been trained then train those we need to
+ if len(self.fitted_at) < len(self.models_in_scope):
+ self.train(
+ models_to_train=[m for m in self.models_in_scope if m not in self.fitted_at],
+ train_data_after=self.initial_train_data_after,
+ train_data_before=self.initial_train_data_before
+ )
+ # retrain all models as per schedule from config
+ elif self.train_every_n > 0 and self.runs_counter % self.train_every_n == 0:
+ self.reinitialize()
+ self.train()
+
+ # roll forward previous predictions around a training step to avoid the possibility of having the training itself trigger an anomaly
+ if (self.runs_counter - self.last_train_at) <= self.train_no_prediction_n:
+ data_probability = self.data_probability_latest
+ data_anomaly = self.data_anomaly_latest
+ else:
+ data_probability, data_anomaly = self.predict()
+ if self.include_average_prob:
+ average_prob = np.mean(list(data_probability.values()))
+ data_probability['average_prob'] = 0 if np.isnan(average_prob) else average_prob
+
+ data = {**data_probability, **data_anomaly}
+
+ self.validate_charts('probability', data_probability, divisor=100)
+ self.validate_charts('anomaly', data_anomaly)
+
+ self.save_data_latest(data, data_probability, data_anomaly)
+
+ #self.info(f'len(data)={len(data)}')
+ #self.info(f'data')
+
+ return data
diff --git a/collectors/python.d.plugin/anomalies/anomalies.conf b/collectors/python.d.plugin/anomalies/anomalies.conf
new file mode 100644
index 00000000..ef867709
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/anomalies.conf
@@ -0,0 +1,184 @@
+# netdata python.d.plugin configuration for anomalies
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 2
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+# Pull data from local Netdata node.
+anomalies:
+ name: 'Anomalies'
+
+ # Host to pull data from.
+ host: '127.0.0.1:19999'
+
+ # Username and Password for Netdata if using basic auth.
+ # username: '???'
+ # password: '???'
+
+ # Use http or https to pull data
+ protocol: 'http'
+
+ # SSL verify parameter for requests.get() calls
+ tls_verify: true
+
+ # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: 'system.uptime,system.entropy'
+
+ # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
+ # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
+ model: 'pca'
+
+ # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
+ train_max_n: 100000
+
+ # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
+ # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
+ train_every_n: 1800
+
+ # The length of the window of data to train on (14400 = last 4 hours).
+ train_n_secs: 14400
+
+ # How many prediction steps after a train event to just use previous prediction value for.
+ # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
+ train_no_prediction_n: 10
+
+ # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
+ # Start of training data for initial model.
+ # initial_train_data_after: 1604578857
+
+ # End of training data for initial model.
+ # initial_train_data_before: 1604593257
+
+ # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
+ offset_n_secs: 0
+
+ # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
+ lags_n: 5
+
+ # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
+ smooth_n: 3
+
+ # How many differences to take in preprocessing your data.
+ # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
+ # diffs_n=0 would mean training models on the raw values of each dimension.
+ # diffs_n=1 means everything is done in terms of differences.
+ diffs_n: 1
+
+ # What is the typical proportion of anomalies in your data on average?
+ # This parameter can control the sensitivity of your models to anomalies.
+ # Some discussion here: https://github.com/yzhao062/pyod/issues/144
+ contamination: 0.001
+
+ # Set to true to include an "average_prob" dimension on anomalies probability chart which is
+ # just the average of all anomaly probabilities at each time step
+ include_average_prob: true
+
+ # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
+ # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
+ # and one on the cpu and mem apps metrics for the python.d.plugin.
+ # custom_models:
+ # - name: 'demos_cpu'
+ # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+ # - name: 'apps_python_d_plugin'
+ # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
+
+ # Set to true to normalize, using min-max standardization, features used for the custom models.
+ # Useful if your custom models contain dimensions on very different scales an model you use does
+ # not internally do its own normalization. Usually best to leave as false.
+ # custom_models_normalize: false
+
+# Standalone Custom models example as an additional collector job.
+# custom:
+# name: 'custom'
+# host: '127.0.0.1:19999'
+# protocol: 'http'
+# charts_regex: 'None'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
+# custom_models:
+# - name: 'user_netdata'
+# dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
+# - name: 'apps_python_d_plugin'
+# dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
+
+# Pull data from some demo nodes for cross node custom models.
+# demos:
+# name: 'demos'
+# host: '127.0.0.1:19999'
+# protocol: 'http'
+# charts_regex: 'None'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
+# custom_models:
+# - name: 'system.cpu'
+# dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
+# - name: 'system.ip'
+# dimensions: 'london.my-netdata.io::system.ip|received,london.my-netdata.io::system.ip|sent,newyork.my-netdata.io::system.ip|received,newyork.my-netdata.io::system.ip|sent'
+# - name: 'system.net'
+# dimensions: 'london.my-netdata.io::system.net|received,london.my-netdata.io::system.net|sent,newyork.my-netdata.io::system.net|received,newyork.my-netdata.io::system.net|sent'
+# - name: 'system.io'
+# dimensions: 'london.my-netdata.io::system.io|in,london.my-netdata.io::system.io|out,newyork.my-netdata.io::system.io|in,newyork.my-netdata.io::system.io|out'
+
+# Example additional job if you want to also pull data from a child streaming to your
+# local parent or even a remote node so long as the Netdata REST API is accessible.
+# mychildnode1:
+# name: 'mychildnode1'
+# host: '127.0.0.1:19999/host/mychildnode1'
+# protocol: 'http'
+# charts_regex: 'system\..*'
+# charts_to_exclude: 'None'
+# model: 'pca'
+# train_max_n: 100000
+# train_every_n: 1800
+# train_n_secs: 14400
+# offset_n_secs: 0
+# lags_n: 5
+# smooth_n: 3
+# diffs_n: 1
+# contamination: 0.001
diff --git a/collectors/python.d.plugin/anomalies/metadata.yaml b/collectors/python.d.plugin/anomalies/metadata.yaml
new file mode 100644
index 00000000..d138cf5d
--- /dev/null
+++ b/collectors/python.d.plugin/anomalies/metadata.yaml
@@ -0,0 +1,87 @@
+# NOTE: this file is commented out as users are reccomended to use the
+# native anomaly detection capabilities on the agent instead.
+# meta:
+# plugin_name: python.d.plugin
+# module_name: anomalies
+# monitored_instance:
+# name: python.d anomalies
+# link: ""
+# categories: []
+# icon_filename: ""
+# related_resources:
+# integrations:
+# list: []
+# info_provided_to_referring_integrations:
+# description: ""
+# keywords: []
+# most_popular: false
+# overview:
+# data_collection:
+# metrics_description: ""
+# method_description: ""
+# supported_platforms:
+# include: []
+# exclude: []
+# multi_instance: true
+# additional_permissions:
+# description: ""
+# default_behavior:
+# auto_detection:
+# description: ""
+# limits:
+# description: ""
+# performance_impact:
+# description: ""
+# setup:
+# prerequisites:
+# list: []
+# configuration:
+# file:
+# name: ""
+# description: ""
+# options:
+# description: ""
+# folding:
+# title: ""
+# enabled: true
+# list: []
+# examples:
+# folding:
+# enabled: true
+# title: ""
+# list: []
+# troubleshooting:
+# problems:
+# list: []
+# alerts:
+# - name: anomalies_anomaly_probabilities
+# link: https://github.com/netdata/netdata/blob/master/health/health.d/anomalies.conf
+# metric: anomalies.probability
+# info: average anomaly probability over the last 2 minutes
+# - name: anomalies_anomaly_flags
+# link: https://github.com/netdata/netdata/blob/master/health/health.d/anomalies.conf
+# metric: anomalies.anomaly
+# info: number of anomalies in the last 2 minutes
+# metrics:
+# folding:
+# title: Metrics
+# enabled: false
+# description: ""
+# availability: []
+# scopes:
+# - name: global
+# description: ""
+# labels: []
+# metrics:
+# - name: anomalies.probability
+# description: Anomaly Probability
+# unit: "probability"
+# chart_type: line
+# dimensions:
+# - name: a dimension per probability
+# - name: anomalies.anomaly
+# description: Anomaly
+# unit: "count"
+# chart_type: stacked
+# dimensions:
+# - name: a dimension per anomaly
diff --git a/collectors/python.d.plugin/beanstalk/Makefile.inc b/collectors/python.d.plugin/beanstalk/Makefile.inc
new file mode 100644
index 00000000..4bbb7087
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += beanstalk/beanstalk.chart.py
+dist_pythonconfig_DATA += beanstalk/beanstalk.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += beanstalk/README.md beanstalk/Makefile.inc
+
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
new file mode 120000
index 00000000..4efe1388
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -0,0 +1 @@
+integrations/beanstalk.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
new file mode 100644
index 00000000..396543e5
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+# Description: beanstalk netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import beanstalkc
+
+ BEANSTALKC = True
+except ImportError:
+ BEANSTALKC = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.loaders import load_yaml
+
+ORDER = [
+ 'cpu_usage',
+ 'jobs_rate',
+ 'connections_rate',
+ 'commands_rate',
+ 'current_tubes',
+ 'current_jobs',
+ 'current_connections',
+ 'binlog',
+ 'uptime',
+]
+
+CHARTS = {
+ 'cpu_usage': {
+ 'options': [None, 'Cpu Usage', 'cpu time', 'server statistics', 'beanstalk.cpu_usage', 'area'],
+ 'lines': [
+ ['rusage-utime', 'user', 'incremental'],
+ ['rusage-stime', 'system', 'incremental']
+ ]
+ },
+ 'jobs_rate': {
+ 'options': [None, 'Jobs Rate', 'jobs/s', 'server statistics', 'beanstalk.jobs_rate', 'line'],
+ 'lines': [
+ ['total-jobs', 'total', 'incremental'],
+ ['job-timeouts', 'timeouts', 'incremental']
+ ]
+ },
+ 'connections_rate': {
+ 'options': [None, 'Connections Rate', 'connections/s', 'server statistics', 'beanstalk.connections_rate',
+ 'area'],
+ 'lines': [
+ ['total-connections', 'connections', 'incremental']
+ ]
+ },
+ 'commands_rate': {
+ 'options': [None, 'Commands Rate', 'commands/s', 'server statistics', 'beanstalk.commands_rate', 'stacked'],
+ 'lines': [
+ ['cmd-put', 'put', 'incremental'],
+ ['cmd-peek', 'peek', 'incremental'],
+ ['cmd-peek-ready', 'peek-ready', 'incremental'],
+ ['cmd-peek-delayed', 'peek-delayed', 'incremental'],
+ ['cmd-peek-buried', 'peek-buried', 'incremental'],
+ ['cmd-reserve', 'reserve', 'incremental'],
+ ['cmd-use', 'use', 'incremental'],
+ ['cmd-watch', 'watch', 'incremental'],
+ ['cmd-ignore', 'ignore', 'incremental'],
+ ['cmd-delete', 'delete', 'incremental'],
+ ['cmd-release', 'release', 'incremental'],
+ ['cmd-bury', 'bury', 'incremental'],
+ ['cmd-kick', 'kick', 'incremental'],
+ ['cmd-stats', 'stats', 'incremental'],
+ ['cmd-stats-job', 'stats-job', 'incremental'],
+ ['cmd-stats-tube', 'stats-tube', 'incremental'],
+ ['cmd-list-tubes', 'list-tubes', 'incremental'],
+ ['cmd-list-tube-used', 'list-tube-used', 'incremental'],
+ ['cmd-list-tubes-watched', 'list-tubes-watched', 'incremental'],
+ ['cmd-pause-tube', 'pause-tube', 'incremental']
+ ]
+ },
+ 'current_tubes': {
+ 'options': [None, 'Current Tubes', 'tubes', 'server statistics', 'beanstalk.current_tubes', 'area'],
+ 'lines': [
+ ['current-tubes', 'tubes']
+ ]
+ },
+ 'current_jobs': {
+ 'options': [None, 'Current Jobs', 'jobs', 'server statistics', 'beanstalk.current_jobs', 'stacked'],
+ 'lines': [
+ ['current-jobs-urgent', 'urgent'],
+ ['current-jobs-ready', 'ready'],
+ ['current-jobs-reserved', 'reserved'],
+ ['current-jobs-delayed', 'delayed'],
+ ['current-jobs-buried', 'buried']
+ ]
+ },
+ 'current_connections': {
+ 'options': [None, 'Current Connections', 'connections', 'server statistics',
+ 'beanstalk.current_connections', 'line'],
+ 'lines': [
+ ['current-connections', 'written'],
+ ['current-producers', 'producers'],
+ ['current-workers', 'workers'],
+ ['current-waiting', 'waiting']
+ ]
+ },
+ 'binlog': {
+ 'options': [None, 'Binlog', 'records/s', 'server statistics', 'beanstalk.binlog', 'line'],
+ 'lines': [
+ ['binlog-records-written', 'written', 'incremental'],
+ ['binlog-records-migrated', 'migrated', 'incremental']
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'],
+ 'lines': [
+ ['uptime'],
+ ]
+ }
+}
+
+
+def tube_chart_template(name):
+ order = [
+ '{0}_jobs_rate'.format(name),
+ '{0}_jobs'.format(name),
+ '{0}_connections'.format(name),
+ '{0}_commands'.format(name),
+ '{0}_pause'.format(name)
+ ]
+ family = 'tube {0}'.format(name)
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'],
+ 'lines': [
+ ['_'.join([name, 'total-jobs']), 'jobs', 'incremental']
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'current-jobs-urgent']), 'urgent'],
+ ['_'.join([name, 'current-jobs-ready']), 'ready'],
+ ['_'.join([name, 'current-jobs-reserved']), 'reserved'],
+ ['_'.join([name, 'current-jobs-delayed']), 'delayed'],
+ ['_'.join([name, 'current-jobs-buried']), 'buried']
+ ]
+ },
+ order[2]: {
+ 'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'current-using']), 'using'],
+ ['_'.join([name, 'current-waiting']), 'waiting'],
+ ['_'.join([name, 'current-watching']), 'watching']
+ ]
+ },
+ order[3]: {
+ 'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'],
+ ['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental']
+ ]
+ },
+ order[4]: {
+ 'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'],
+ 'lines': [
+ ['_'.join([name, 'pause']), 'since'],
+ ['_'.join([name, 'pause-time-left']), 'left']
+ ]
+ }
+ }
+
+ return order, charts
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.configuration = configuration
+ self.order = list(ORDER)
+ self.definitions = dict(CHARTS)
+ self.conn = None
+ self.alive = True
+
+ def check(self):
+ if not BEANSTALKC:
+ self.error("'beanstalkc' module is needed to use beanstalk.chart.py")
+ return False
+
+ self.conn = self.connect()
+
+ return True if self.conn else False
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ if not self.is_alive():
+ return None
+
+ active_charts = self.charts.active_charts()
+ data = dict()
+
+ try:
+ data.update(self.conn.stats())
+
+ for tube in self.conn.tubes():
+ stats = self.conn.stats_tube(tube)
+
+ if tube + '_jobs_rate' not in active_charts:
+ self.create_new_tube_charts(tube)
+
+ for stat in stats:
+ data['_'.join([tube, stat])] = stats[stat]
+
+ except beanstalkc.SocketError:
+ self.alive = False
+ return None
+
+ return data or None
+
+ def create_new_tube_charts(self, tube):
+ order, charts = tube_chart_template(tube)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
+ def connect(self):
+ host = self.configuration.get('host', '127.0.0.1')
+ port = self.configuration.get('port', 11300)
+ timeout = self.configuration.get('timeout', 1)
+ try:
+ return beanstalkc.Connection(host=host,
+ port=port,
+ connect_timeout=timeout,
+ parse_yaml=load_yaml)
+ except beanstalkc.SocketError as error:
+ self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error))
+ return None
+
+ def reconnect(self):
+ try:
+ self.conn.reconnect()
+ self.alive = True
+ return True
+ except beanstalkc.SocketError:
+ return False
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf
new file mode 100644
index 00000000..6d9773a1
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf
@@ -0,0 +1,78 @@
+# netdata python.d.plugin configuration for beanstalk
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# chart_cleanup sets the default chart cleanup interval in iterations.
+# A chart is marked as obsolete if it has not been updated
+# 'chart_cleanup' iterations in a row.
+# When a plugin sends the obsolete flag, the charts are not deleted
+# from netdata immediately.
+# They will be hidden immediately (not offered to dashboard viewer,
+# streamed upstream and archived to external databases) and deleted one hour
+# later (configurable from netdata.conf).
+# chart_cleanup: 10
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations
+#
+# Additionally to the above, beanstalk also supports the following:
+#
+# host: 'host' # Server ip address or hostname. Default: 127.0.0.1
+# port: port # Beanstalkd port. Default:
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md b/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md
new file mode 100644
index 00000000..5095c0c2
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md
@@ -0,0 +1,219 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/beanstalk/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/beanstalk/metadata.yaml"
+sidebar_label: "Beanstalk"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Beanstalk
+
+
+<img src="https://netdata.cloud/img/beanstalk.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: beanstalk
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management.
+
+The collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Beanstalk instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| beanstalk.cpu_usage | user, system | cpu time |
+| beanstalk.jobs_rate | total, timeouts | jobs/s |
+| beanstalk.connections_rate | connections | connections/s |
+| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |
+| beanstalk.connections_rate | tubes | tubes |
+| beanstalk.current_jobs | urgent, ready, reserved, delayed, buried | jobs |
+| beanstalk.current_connections | written, producers, workers, waiting | connections |
+| beanstalk.binlog | written, migrated | records/s |
+| beanstalk.uptime | uptime | seconds |
+
+### Per tube
+
+Metrics related to Beanstalk tubes. Each tube produces its own set of the following metrics.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| beanstalk.jobs_rate | jobs | jobs/s |
+| beanstalk.jobs | urgent, ready, reserved, delayed, buried | jobs |
+| beanstalk.connections | using, waiting, watching | connections |
+| beanstalk.commands | deletes, pauses | commands/s |
+| beanstalk.pause | since, left | seconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |
+
+
+## Setup
+
+### Prerequisites
+
+#### beanstalkc python module
+
+The collector requires the `beanstalkc` python module to be installed.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/beanstalk.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/beanstalk.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| host | IP or URL to a beanstalk service. | 127.0.0.1 | no |
+| port | Port to the IP or URL to a beanstalk service. | 11300 | no |
+
+</details>
+
+#### Examples
+
+##### Remote beanstalk server
+
+A basic remote beanstalk server
+
+```yaml
+remote:
+ name: 'beanstalk'
+ host: '1.2.3.4'
+ port: 11300
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local_beanstalk'
+ host: '127.0.0.1'
+ port: 11300
+
+remote_job:
+ name: 'remote_beanstalk'
+ host: '192.0.2.1'
+ port: 113000
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `beanstalk` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin beanstalk debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/beanstalk/metadata.yaml b/collectors/python.d.plugin/beanstalk/metadata.yaml
new file mode 100644
index 00000000..7dff9cb3
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/metadata.yaml
@@ -0,0 +1,263 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: beanstalk
+ monitored_instance:
+ name: Beanstalk
+ link: "https://beanstalkd.github.io/"
+ categories:
+ - data-collection.message-brokers
+ #- data-collection.task-queues
+ icon_filename: "beanstalk.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - beanstalk
+ - beanstalkd
+ - message
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management."
+ method_description: "The collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "If no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "beanstalkc python module"
+ description: The collector requires the `beanstalkc` python module to be installed.
+ configuration:
+ file:
+ name: python.d/beanstalk.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: host
+ description: IP or URL to a beanstalk service.
+ default_value: "127.0.0.1"
+ required: false
+ - name: port
+ description: Port to the IP or URL to a beanstalk service.
+ default_value: "11300"
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Remote beanstalk server
+ description: A basic remote beanstalk server
+ folding:
+ enabled: false
+ config: |
+ remote:
+ name: 'beanstalk'
+ host: '1.2.3.4'
+ port: 11300
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ localhost:
+ name: 'local_beanstalk'
+ host: '127.0.0.1'
+ port: 11300
+
+ remote_job:
+ name: 'remote_beanstalk'
+ host: '192.0.2.1'
+ port: 113000
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: beanstalk_server_buried_jobs
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/beanstalkd.conf
+ metric: beanstalk.current_jobs
+ info: number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs.
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: beanstalk.cpu_usage
+ description: Cpu Usage
+ unit: "cpu time"
+ chart_type: area
+ dimensions:
+ - name: user
+ - name: system
+ - name: beanstalk.jobs_rate
+ description: Jobs Rate
+ unit: "jobs/s"
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: timeouts
+ - name: beanstalk.connections_rate
+ description: Connections Rate
+ unit: "connections/s"
+ chart_type: area
+ dimensions:
+ - name: connections
+ - name: beanstalk.commands_rate
+ description: Commands Rate
+ unit: "commands/s"
+ chart_type: stacked
+ dimensions:
+ - name: put
+ - name: peek
+ - name: peek-ready
+ - name: peek-delayed
+ - name: peek-buried
+ - name: reserve
+ - name: use
+ - name: watch
+ - name: ignore
+ - name: delete
+ - name: bury
+ - name: kick
+ - name: stats
+ - name: stats-job
+ - name: stats-tube
+ - name: list-tubes
+ - name: list-tube-used
+ - name: list-tubes-watched
+ - name: pause-tube
+ - name: beanstalk.connections_rate
+ description: Current Tubes
+ unit: "tubes"
+ chart_type: area
+ dimensions:
+ - name: tubes
+ - name: beanstalk.current_jobs
+ description: Current Jobs
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: urgent
+ - name: ready
+ - name: reserved
+ - name: delayed
+ - name: buried
+ - name: beanstalk.current_connections
+ description: Current Connections
+ unit: "connections"
+ chart_type: line
+ dimensions:
+ - name: written
+ - name: producers
+ - name: workers
+ - name: waiting
+ - name: beanstalk.binlog
+ description: Binlog
+ unit: "records/s"
+ chart_type: line
+ dimensions:
+ - name: written
+ - name: migrated
+ - name: beanstalk.uptime
+ description: seconds
+ unit: "seconds"
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: tube
+ description: "Metrics related to Beanstalk tubes. Each tube produces its own set of the following metrics."
+ labels: []
+ metrics:
+ - name: beanstalk.jobs_rate
+ description: Jobs Rate
+ unit: "jobs/s"
+ chart_type: area
+ dimensions:
+ - name: jobs
+ - name: beanstalk.jobs
+ description: Jobs
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: urgent
+ - name: ready
+ - name: reserved
+ - name: delayed
+ - name: buried
+ - name: beanstalk.connections
+ description: Connections
+ unit: "connections"
+ chart_type: stacked
+ dimensions:
+ - name: using
+ - name: waiting
+ - name: watching
+ - name: beanstalk.commands
+ description: Commands
+ unit: "commands/s"
+ chart_type: stacked
+ dimensions:
+ - name: deletes
+ - name: pauses
+ - name: beanstalk.pause
+ description: Pause
+ unit: "seconds"
+ chart_type: stacked
+ dimensions:
+ - name: since
+ - name: left
diff --git a/collectors/python.d.plugin/bind_rndc/Makefile.inc b/collectors/python.d.plugin/bind_rndc/Makefile.inc
new file mode 100644
index 00000000..72f39149
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += bind_rndc/bind_rndc.chart.py
+dist_pythonconfig_DATA += bind_rndc/bind_rndc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += bind_rndc/README.md bind_rndc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
new file mode 120000
index 00000000..03a182ae
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -0,0 +1 @@
+integrations/isc_bind_rndc.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
new file mode 100644
index 00000000..9d6c9fec
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+# Description: bind rndc netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+from collections import defaultdict
+from subprocess import Popen
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
+
+update_every = 30
+
+ORDER = [
+ 'name_server_statistics',
+ 'incoming_queries',
+ 'outgoing_queries',
+ 'named_stats_size',
+]
+
+CHARTS = {
+ 'name_server_statistics': {
+ 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics',
+ 'bind_rndc.name_server_statistics', 'line'],
+ 'lines': [
+ ['nms_requests', 'requests', 'incremental'],
+ ['nms_rejected_queries', 'rejected_queries', 'incremental'],
+ ['nms_success', 'success', 'incremental'],
+ ['nms_failure', 'failure', 'incremental'],
+ ['nms_responses', 'responses', 'incremental'],
+ ['nms_duplicate', 'duplicate', 'incremental'],
+ ['nms_recursion', 'recursion', 'incremental'],
+ ['nms_nxrrset', 'nxrrset', 'incremental'],
+ ['nms_nxdomain', 'nxdomain', 'incremental'],
+ ['nms_non_auth_answer', 'non_auth_answer', 'incremental'],
+ ['nms_auth_answer', 'auth_answer', 'incremental'],
+ ['nms_dropped_queries', 'dropped_queries', 'incremental'],
+ ]},
+ 'incoming_queries': {
+ 'options': [None, 'Incoming Queries', 'queries', 'incoming queries', 'bind_rndc.incoming_queries', 'line'],
+ 'lines': [
+ ]},
+ 'outgoing_queries': {
+ 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries', 'bind_rndc.outgoing_queries', 'line'],
+ 'lines': [
+ ]},
+ 'named_stats_size': {
+ 'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
+ 'lines': [
+ ['stats_size', None, 'absolute', 1, 1 << 20]
+ ]
+ }
+}
+
+NMS = {
+ 'nms_requests': [
+ 'IPv4 requests received',
+ 'IPv6 requests received',
+ 'TCP requests received',
+ 'requests with EDNS(0) receive'
+ ],
+ 'nms_responses': [
+ 'responses sent',
+ 'truncated responses sent',
+ 'responses with EDNS(0) sent',
+ 'requests with unsupported EDNS version received'
+ ],
+ 'nms_failure': [
+ 'other query failures',
+ 'queries resulted in SERVFAIL'
+ ],
+ 'nms_auth_answer': ['queries resulted in authoritative answer'],
+ 'nms_non_auth_answer': ['queries resulted in non authoritative answer'],
+ 'nms_nxrrset': ['queries resulted in nxrrset'],
+ 'nms_success': ['queries resulted in successful answer'],
+ 'nms_nxdomain': ['queries resulted in NXDOMAIN'],
+ 'nms_recursion': ['queries caused recursion'],
+ 'nms_duplicate': ['duplicate queries received'],
+ 'nms_rejected_queries': [
+ 'auth queries rejected',
+ 'recursive queries rejected'
+ ],
+ 'nms_dropped_queries': ['queries dropped']
+}
+
+STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
+ self.rndc = find_binary('rndc')
+ self.data = dict(
+ nms_requests=0,
+ nms_responses=0,
+ nms_failure=0,
+ nms_auth=0,
+ nms_non_auth=0,
+ nms_nxrrset=0,
+ nms_success=0,
+ nms_nxdomain=0,
+ nms_recursion=0,
+ nms_duplicate=0,
+ nms_rejected_queries=0,
+ nms_dropped_queries=0,
+ )
+
+ def check(self):
+ if not self.rndc:
+ self.error('Can\'t locate "rndc" binary or binary is not executable by netdata')
+ return False
+
+ if not (os.path.isfile(self.named_stats_path) and os.access(self.named_stats_path, os.R_OK)):
+ self.error('Cannot access file %s' % self.named_stats_path)
+ return False
+
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if not run_rndc.returncode:
+ return True
+ self.error('Not enough permissions to run "%s stats"' % self.rndc)
+ return False
+
+ def _get_raw_data(self):
+ """
+ Run 'rndc stats' and read last dump from named.stats
+ :return: dict
+ """
+ result = dict()
+ try:
+ current_size = os.path.getsize(self.named_stats_path)
+ run_rndc = Popen([self.rndc, 'stats'], shell=False)
+ run_rndc.wait()
+
+ if run_rndc.returncode:
+ return None
+ with open(self.named_stats_path) as named_stats:
+ named_stats.seek(current_size)
+ result['stats'] = named_stats.readlines()
+ result['size'] = current_size
+ return result
+ except (OSError, IOError):
+ return None
+
+ def _get_data(self):
+ """
+ Parse data from _get_raw_data()
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+
+ if raw_data is None:
+ return None
+ parsed = dict()
+ for stat in STATS:
+ parsed[stat] = parse_stats(field=stat,
+ named_stats=raw_data['stats'])
+
+ self.data.update(nms_mapper(data=parsed['Name Server Statistics']))
+
+ for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']):
+ parsed_key, chart_name = elem[0], elem[1]
+ for dimension_id, value in queries_mapper(data=parsed[parsed_key],
+ add=chart_name[:9]).items():
+
+ if dimension_id not in self.data:
+ dimension = dimension_id.replace(chart_name[:9], '')
+ if dimension_id not in self.charts[chart_name]:
+ self.charts[chart_name].add_dimension([dimension_id, dimension, 'incremental'])
+
+ self.data[dimension_id] = value
+
+ self.data['stats_size'] = raw_data['size']
+ return self.data
+
+
+def parse_stats(field, named_stats):
+ """
+ :param field: str:
+ :param named_stats: list:
+ :return: dict
+
+ Example:
+ filed: 'Incoming Queries'
+ names_stats (list of lines):
+ ++ Incoming Requests ++
+ 1405660 QUERY
+ 3 NOTIFY
+ ++ Incoming Queries ++
+ 1214961 A
+ 75 NS
+ 2 CNAME
+ 2897 SOA
+ 35544 PTR
+ 14 MX
+ 5822 TXT
+ 145974 AAAA
+ 371 SRV
+ ++ Outgoing Queries ++
+ ...
+
+ result:
+ {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...}
+ """
+ data = dict()
+ ns = iter(named_stats)
+ for line in ns:
+ if field not in line:
+ continue
+ while True:
+ try:
+ line = next(ns)
+ except StopIteration:
+ break
+ if '++' not in line:
+ if '[' in line:
+ continue
+ v, k = line.strip().split(' ', 1)
+ if k not in data:
+ data[k] = 0
+ data[k] += int(v)
+ continue
+ break
+ break
+ return data
+
+
+def nms_mapper(data):
+ """
+ :param data: dict
+ :return: dict(defaultdict)
+ """
+ result = defaultdict(int)
+ for k, v in NMS.items():
+ for elem in v:
+ result[k] += data.get(elem, 0)
+ return result
+
+
+def queries_mapper(data, add):
+ """
+ :param data: dict
+ :param add: str
+ :return: dict
+ """
+ return dict([(add + k, v) for k, v in data.items()])
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
new file mode 100644
index 00000000..84eaf059
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
@@ -0,0 +1,108 @@
+# netdata python.d.plugin configuration for bind_rndc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, bind_rndc also supports the following:
+#
+# named_stats_path: 'path to named.stats' # Default: '/var/log/bind/named.stats'
+#------------------------------------------------------------------------------------------------------------------
+# Important Information
+#
+# BIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set update_every below 30 sec.
+# It is STRONGLY RECOMMENDED to create a bind-rndc.conf file for logrotate.
+#
+# To set up your BIND to dump stats do the following:
+#
+# 1. Add to 'named.conf.options' options {}:
+# statistics-file "/var/log/bind/named.stats";
+#
+# 2. Create bind/ directory in /var/log
+# cd /var/log/ && mkdir bind
+#
+# 3. Change owner of directory to 'bind' user
+# chown bind bind/
+#
+# 4. RELOAD (NOT restart) BIND
+# systemctl reload bind9.service
+#
+# 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
+#
+# To allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata
+# chown :netdata rndc.key
+#
+# Last, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:
+#
+# /var/log/bind/named.stats {
+#
+# daily
+# rotate 4
+# compress
+# delaycompress
+# create 0644 bind bind
+# missingok
+# postrotate
+# rndc reload > /dev/null
+# endscript
+# }
+#
+# To test your logrotate conf file run as root:
+# logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md b/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md
new file mode 100644
index 00000000..163f8282
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md
@@ -0,0 +1,215 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/bind_rndc/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/bind_rndc/metadata.yaml"
+sidebar_label: "ISC Bind (RNDC)"
+learn_status: "Published"
+learn_rel_path: "Data Collection/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ISC Bind (RNDC)
+
+
+<img src="https://netdata.cloud/img/isc.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: bind_rndc
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor ISCBind (RNDC) performance for optimal DNS server operations. Monitor query rates, response times, and error rates to ensure reliable DNS service delivery.
+
+This collector uses the `rndc` tool to dump (named.stats) statistics then read them to gather Bind Name Server summary performance metrics.
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is given, the collector will attempt to read named.stats file at `/var/log/bind/named.stats`
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per ISC Bind (RNDC) instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| bind_rndc.name_server_statistics | requests, rejected_queries, success, failure, responses, duplicate, recursion, nxrrset, nxdomain, non_auth_answer, auth_answer, dropped_queries | stats |
+| bind_rndc.incoming_queries | a dimension per incoming query type | queries |
+| bind_rndc.outgoing_queries | a dimension per outgoing query type | queries |
+| bind_rndc.stats_size | stats_size | MiB |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ bind_rndc_stats_file_size ](https://github.com/netdata/netdata/blob/master/health/health.d/bind_rndc.conf) | bind_rndc.stats_size | BIND statistics-file size |
+
+
+## Setup
+
+### Prerequisites
+
+#### Minimum bind version and permissions
+
+Version of bind must be >=9.6 and the Netdata user must have permissions to run `rndc stats`
+
+#### Setup log rotate for bind stats
+
+BIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set `update_every` below 30 sec.
+It is STRONGLY RECOMMENDED to create a `bind-rndc.conf` file for logrotate.
+
+To set up BIND to dump stats do the following:
+
+1. Add to 'named.conf.options' options {}:
+`statistics-file "/var/log/bind/named.stats";`
+
+2. Create bind/ directory in /var/log:
+`cd /var/log/ && mkdir bind`
+
+3. Change owner of directory to 'bind' user:
+`chown bind bind/`
+
+4. RELOAD (NOT restart) BIND:
+`systemctl reload bind9.service`
+
+5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
+
+To allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata:
+`chown :netdata rndc.key`
+
+Last, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:
+```
+/var/log/bind/named.stats {
+
+ daily
+ rotate 4
+ compress
+ delaycompress
+ create 0644 bind bind
+ missingok
+ postrotate
+ rndc reload > /dev/null
+ endscript
+}
+```
+To test your logrotate conf file run as root:
+`logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)`
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/bind_rndc.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/bind_rndc.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| named_stats_path | Path to the named stats, after being dumped by `nrdc` | /var/log/bind/named.stats | no |
+
+</details>
+
+#### Examples
+
+##### Local bind stats
+
+Define a local path to bind stats file
+
+```yaml
+local:
+ named_stats_path: '/var/log/bind/named.stats'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `bind_rndc` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin bind_rndc debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/bind_rndc/metadata.yaml b/collectors/python.d.plugin/bind_rndc/metadata.yaml
new file mode 100644
index 00000000..e3568e44
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/metadata.yaml
@@ -0,0 +1,191 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: bind_rndc
+ monitored_instance:
+ name: ISC Bind (RNDC)
+ link: "https://www.isc.org/bind/"
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ icon_filename: "isc.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - dns
+ - bind
+ - server
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor ISCBind (RNDC) performance for optimal DNS server operations. Monitor query rates, response times, and error rates to ensure reliable DNS service delivery."
+ method_description: "This collector uses the `rndc` tool to dump (named.stats) statistics then read them to gather Bind Name Server summary performance metrics."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "If no configuration is given, the collector will attempt to read named.stats file at `/var/log/bind/named.stats`"
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "Minimum bind version and permissions"
+ description: "Version of bind must be >=9.6 and the Netdata user must have permissions to run `rndc stats`"
+ - title: "Setup log rotate for bind stats"
+ description: |
+ BIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set `update_every` below 30 sec.
+ It is STRONGLY RECOMMENDED to create a `bind-rndc.conf` file for logrotate.
+
+ To set up BIND to dump stats do the following:
+
+ 1. Add to 'named.conf.options' options {}:
+ `statistics-file "/var/log/bind/named.stats";`
+
+ 2. Create bind/ directory in /var/log:
+ `cd /var/log/ && mkdir bind`
+
+ 3. Change owner of directory to 'bind' user:
+ `chown bind bind/`
+
+ 4. RELOAD (NOT restart) BIND:
+ `systemctl reload bind9.service`
+
+ 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
+
+ To allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata:
+ `chown :netdata rndc.key`
+
+ Last, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:
+ ```
+ /var/log/bind/named.stats {
+
+ daily
+ rotate 4
+ compress
+ delaycompress
+ create 0644 bind bind
+ missingok
+ postrotate
+ rndc reload > /dev/null
+ endscript
+ }
+ ```
+ To test your logrotate conf file run as root:
+ `logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)`
+ configuration:
+ file:
+ name: python.d/bind_rndc.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: named_stats_path
+ description: Path to the named stats, after being dumped by `nrdc`
+ default_value: "/var/log/bind/named.stats"
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Local bind stats
+ description: Define a local path to bind stats file
+ config: |
+ local:
+ named_stats_path: '/var/log/bind/named.stats'
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: bind_rndc_stats_file_size
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/bind_rndc.conf
+ metric: bind_rndc.stats_size
+ info: BIND statistics-file size
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: bind_rndc.name_server_statistics
+ description: Name Server Statistics
+ unit: "stats"
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: rejected_queries
+ - name: success
+ - name: failure
+ - name: responses
+ - name: duplicate
+ - name: recursion
+ - name: nxrrset
+ - name: nxdomain
+ - name: non_auth_answer
+ - name: auth_answer
+ - name: dropped_queries
+ - name: bind_rndc.incoming_queries
+ description: Incoming queries
+ unit: "queries"
+ chart_type: line
+ dimensions:
+ - name: a dimension per incoming query type
+ - name: bind_rndc.outgoing_queries
+ description: Outgoing queries
+ unit: "queries"
+ chart_type: line
+ dimensions:
+ - name: a dimension per outgoing query type
+ - name: bind_rndc.stats_size
+ description: Named Stats File Size
+ unit: "MiB"
+ chart_type: line
+ dimensions:
+ - name: stats_size
diff --git a/collectors/python.d.plugin/boinc/Makefile.inc b/collectors/python.d.plugin/boinc/Makefile.inc
new file mode 100644
index 00000000..319e19cf
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += boinc/boinc.chart.py
+dist_pythonconfig_DATA += boinc/boinc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += boinc/README.md boinc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
new file mode 120000
index 00000000..22c10ca1
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -0,0 +1 @@
+integrations/boinc.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
new file mode 100644
index 00000000..a31eda1c
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
@@ -0,0 +1,168 @@
+# -*- coding: utf-8 -*-
+# Description: BOINC netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import socket
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from third_party import boinc_client
+
+ORDER = [
+ 'tasks',
+ 'states',
+ 'sched_states',
+ 'process_states',
+]
+
+CHARTS = {
+ 'tasks': {
+ 'options': [None, 'Overall Tasks', 'tasks', 'boinc', 'boinc.tasks', 'line'],
+ 'lines': [
+ ['total', 'Total', 'absolute', 1, 1],
+ ['active', 'Active', 'absolute', 1, 1]
+ ]
+ },
+ 'states': {
+ 'options': [None, 'Tasks per State', 'tasks', 'boinc', 'boinc.states', 'line'],
+ 'lines': [
+ ['new', 'New', 'absolute', 1, 1],
+ ['downloading', 'Downloading', 'absolute', 1, 1],
+ ['downloaded', 'Ready to Run', 'absolute', 1, 1],
+ ['comperror', 'Compute Errors', 'absolute', 1, 1],
+ ['uploading', 'Uploading', 'absolute', 1, 1],
+ ['uploaded', 'Uploaded', 'absolute', 1, 1],
+ ['aborted', 'Aborted', 'absolute', 1, 1],
+ ['upload_failed', 'Failed Uploads', 'absolute', 1, 1]
+ ]
+ },
+ 'sched_states': {
+ 'options': [None, 'Tasks per Scheduler State', 'tasks', 'boinc', 'boinc.sched', 'line'],
+ 'lines': [
+ ['uninit_sched', 'Uninitialized', 'absolute', 1, 1],
+ ['preempted', 'Preempted', 'absolute', 1, 1],
+ ['scheduled', 'Scheduled', 'absolute', 1, 1]
+ ]
+ },
+ 'process_states': {
+ 'options': [None, 'Tasks per Process State', 'tasks', 'boinc', 'boinc.process', 'line'],
+ 'lines': [
+ ['uninit_proc', 'Uninitialized', 'absolute', 1, 1],
+ ['executing', 'Executing', 'absolute', 1, 1],
+ ['suspended', 'Suspended', 'absolute', 1, 1],
+ ['aborting', 'Aborted', 'absolute', 1, 1],
+ ['quit', 'Quit', 'absolute', 1, 1],
+ ['copy_pending', 'Copy Pending', 'absolute', 1, 1]
+ ]
+ }
+}
+
+# A simple template used for pre-loading the return dictionary to make
+# the _get_data() method simpler.
+_DATA_TEMPLATE = {
+ 'total': 0,
+ 'active': 0,
+ 'new': 0,
+ 'downloading': 0,
+ 'downloaded': 0,
+ 'comperror': 0,
+ 'uploading': 0,
+ 'uploaded': 0,
+ 'aborted': 0,
+ 'upload_failed': 0,
+ 'uninit_sched': 0,
+ 'preempted': 0,
+ 'scheduled': 0,
+ 'uninit_proc': 0,
+ 'executing': 0,
+ 'suspended': 0,
+ 'aborting': 0,
+ 'quit': 0,
+ 'copy_pending': 0
+}
+
+# Map task states to dimensions
+_TASK_MAP = {
+ boinc_client.ResultState.NEW: 'new',
+ boinc_client.ResultState.FILES_DOWNLOADING: 'downloading',
+ boinc_client.ResultState.FILES_DOWNLOADED: 'downloaded',
+ boinc_client.ResultState.COMPUTE_ERROR: 'comperror',
+ boinc_client.ResultState.FILES_UPLOADING: 'uploading',
+ boinc_client.ResultState.FILES_UPLOADED: 'uploaded',
+ boinc_client.ResultState.ABORTED: 'aborted',
+ boinc_client.ResultState.UPLOAD_FAILED: 'upload_failed'
+}
+
+# Map scheduler states to dimensions
+_SCHED_MAP = {
+ boinc_client.CpuSched.UNINITIALIZED: 'uninit_sched',
+ boinc_client.CpuSched.PREEMPTED: 'preempted',
+ boinc_client.CpuSched.SCHEDULED: 'scheduled',
+}
+
+# Maps process states to dimensions
+_PROC_MAP = {
+ boinc_client.Process.UNINITIALIZED: 'uninit_proc',
+ boinc_client.Process.EXECUTING: 'executing',
+ boinc_client.Process.SUSPENDED: 'suspended',
+ boinc_client.Process.ABORT_PENDING: 'aborted',
+ boinc_client.Process.QUIT_PENDING: 'quit',
+ boinc_client.Process.COPY_PENDING: 'copy_pending'
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 0)
+ self.password = self.configuration.get('password', '')
+ self.client = boinc_client.BoincClient(host=self.host, port=self.port, passwd=self.password)
+ self.alive = False
+
+ def check(self):
+ return self.connect()
+
+ def connect(self):
+ self.client.connect()
+ self.alive = self.client.connected and self.client.authorized
+ return self.alive
+
+ def reconnect(self):
+ # The client class itself actually disconnects existing
+ # connections when it is told to connect, so we don't need to
+ # explicitly disconnect when we're just trying to reconnect.
+ return self.connect()
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+
+ data = dict(_DATA_TEMPLATE)
+
+ try:
+ results = self.client.get_tasks()
+ except socket.error:
+ self.error('Connection is dead')
+ self.alive = False
+ return None
+
+ for task in results:
+ data['total'] += 1
+ data[_TASK_MAP[task.state]] += 1
+ try:
+ if task.active_task:
+ data['active'] += 1
+ data[_SCHED_MAP[task.scheduler_state]] += 1
+ data[_PROC_MAP[task.active_task_state]] += 1
+ except AttributeError:
+ pass
+
+ return data or None
diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf
new file mode 100644
index 00000000..16edf55c
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/boinc.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for boinc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, boinc also supports the following:
+#
+# hostname: localhost # The host running the BOINC client
+# port: 31416 # The remote GUI RPC port for BOINC
+# password: '' # The remote GUI RPC password
diff --git a/collectors/python.d.plugin/boinc/integrations/boinc.md b/collectors/python.d.plugin/boinc/integrations/boinc.md
new file mode 100644
index 00000000..d6874d45
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/integrations/boinc.md
@@ -0,0 +1,204 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/boinc/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/boinc/metadata.yaml"
+sidebar_label: "BOINC"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Distributed Computing Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# BOINC
+
+
+<img src="https://netdata.cloud/img/bolt.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: boinc
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.
+
+It uses the same RPC interface that the BOINC monitoring GUI does.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per BOINC instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| boinc.tasks | Total, Active | tasks |
+| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |
+| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |
+| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |
+| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |
+| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |
+| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |
+
+
+## Setup
+
+### Prerequisites
+
+#### Boinc RPC interface
+
+BOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/boinc.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/boinc.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| hostname | Define a hostname where boinc is running. | localhost | no |
+| port | The port of boinc RPC interface. | | no |
+| password | Provide a password to connect to a boinc RPC interface. | | no |
+
+</details>
+
+#### Examples
+
+##### Configuration of a remote boinc instance
+
+A basic JOB configuration for a remote boinc instance
+
+```yaml
+remote:
+ hostname: '1.2.3.4'
+ port: 1234
+ password: 'some-password'
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 1234
+ password: 'some-password'
+
+remote_job:
+ name: 'remote'
+ host: '192.0.2.1'
+ port: 1234
+ password: some-other-password
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin boinc debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/boinc/metadata.yaml b/collectors/python.d.plugin/boinc/metadata.yaml
new file mode 100644
index 00000000..33a67ac3
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/metadata.yaml
@@ -0,0 +1,198 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: boinc
+ monitored_instance:
+ name: BOINC
+ link: "https://boinc.berkeley.edu/"
+ categories:
+ - data-collection.distributed-computing-systems
+ icon_filename: "bolt.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - boinc
+ - distributed
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client."
+ method_description: "It uses the same RPC interface that the BOINC monitoring GUI does."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "By default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "Boinc RPC interface"
+ description: BOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
+ configuration:
+ file:
+ name: python.d/boinc.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: hostname
+ description: Define a hostname where boinc is running.
+ default_value: "localhost"
+ required: false
+ - name: port
+ description: The port of boinc RPC interface.
+ default_value: ""
+ required: false
+ - name: password
+ description: Provide a password to connect to a boinc RPC interface.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Configuration of a remote boinc instance
+ description: A basic JOB configuration for a remote boinc instance
+ folding:
+ enabled: false
+ config: |
+ remote:
+ hostname: '1.2.3.4'
+ port: 1234
+ password: 'some-password'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 1234
+ password: 'some-password'
+
+ remote_job:
+ name: 'remote'
+ host: '192.0.2.1'
+ port: 1234
+ password: some-other-password
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: boinc_total_tasks
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf
+ metric: boinc.tasks
+ info: average number of total tasks over the last 10 minutes
+ os: "*"
+ - name: boinc_active_tasks
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf
+ metric: boinc.tasks
+ info: average number of active tasks over the last 10 minutes
+ os: "*"
+ - name: boinc_compute_errors
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf
+ metric: boinc.states
+ info: average number of compute errors over the last 10 minutes
+ os: "*"
+ - name: boinc_upload_errors
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf
+ metric: boinc.states
+ info: average number of failed uploads over the last 10 minutes
+ os: "*"
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: boinc.tasks
+ description: Overall Tasks
+ unit: "tasks"
+ chart_type: line
+ dimensions:
+ - name: Total
+ - name: Active
+ - name: boinc.states
+ description: Tasks per State
+ unit: "tasks"
+ chart_type: line
+ dimensions:
+ - name: New
+ - name: Downloading
+ - name: Ready to Run
+ - name: Compute Errors
+ - name: Uploading
+ - name: Uploaded
+ - name: Aborted
+ - name: Failed Uploads
+ - name: boinc.sched
+ description: Tasks per Scheduler State
+ unit: "tasks"
+ chart_type: line
+ dimensions:
+ - name: Uninitialized
+ - name: Preempted
+ - name: Scheduled
+ - name: boinc.process
+ description: Tasks per Process State
+ unit: "tasks"
+ chart_type: line
+ dimensions:
+ - name: Uninitialized
+ - name: Executing
+ - name: Suspended
+ - name: Aborted
+ - name: Quit
+ - name: Copy Pending
diff --git a/collectors/python.d.plugin/ceph/Makefile.inc b/collectors/python.d.plugin/ceph/Makefile.inc
new file mode 100644
index 00000000..15b039ef
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ceph/ceph.chart.py
+dist_pythonconfig_DATA += ceph/ceph.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ceph/README.md ceph/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
new file mode 120000
index 00000000..654248b7
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -0,0 +1 @@
+integrations/ceph.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
new file mode 100644
index 00000000..4bcbe197
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
@@ -0,0 +1,374 @@
+# -*- coding: utf-8 -*-
+# Description: ceph netdata python.d module
+# Author: Luis Eduardo (lets00)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import rados
+
+ CEPH = True
+except ImportError:
+ CEPH = False
+
+import json
+import os
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+update_every = 10
+
+ORDER = [
+ 'general_usage',
+ 'general_objects',
+ 'general_bytes',
+ 'general_operations',
+ 'general_latency',
+ 'pool_usage',
+ 'pool_objects',
+ 'pool_read_bytes',
+ 'pool_write_bytes',
+ 'pool_read_operations',
+ 'pool_write_operations',
+ 'osd_usage',
+ 'osd_size',
+ 'osd_apply_latency',
+ 'osd_commit_latency'
+]
+
+CHARTS = {
+ 'general_usage': {
+ 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
+ 'lines': [
+ ['general_available', 'avail', 'absolute'],
+ ['general_usage', 'used', 'absolute']
+ ]
+ },
+ 'general_objects': {
+ 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'],
+ 'lines': [
+ ['general_objects', 'cluster', 'absolute']
+ ]
+ },
+ 'general_bytes': {
+ 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
+ 'area'],
+ 'lines': [
+ ['general_read_bytes', 'read', 'absolute', 1, 1024],
+ ['general_write_bytes', 'write', 'absolute', -1, 1024]
+ ]
+ },
+ 'general_operations': {
+ 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations',
+ 'area'],
+ 'lines': [
+ ['general_read_operations', 'read', 'absolute', 1],
+ ['general_write_operations', 'write', 'absolute', -1]
+ ]
+ },
+ 'general_latency': {
+ 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency',
+ 'area'],
+ 'lines': [
+ ['general_apply_latency', 'apply', 'absolute'],
+ ['general_commit_latency', 'commit', 'absolute']
+ ]
+ },
+ 'pool_usage': {
+ 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
+ 'lines': []
+ },
+ 'pool_objects': {
+ 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'],
+ 'lines': []
+ },
+ 'pool_read_bytes': {
+ 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
+ 'lines': []
+ },
+ 'pool_write_bytes': {
+ 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
+ 'lines': []
+ },
+ 'pool_read_operations': {
+ 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'],
+ 'lines': []
+ },
+ 'pool_write_operations': {
+ 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'],
+ 'lines': []
+ },
+ 'osd_usage': {
+ 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
+ 'lines': []
+ },
+ 'osd_size': {
+ 'options': [None, 'Ceph OSDs size', 'KiB', 'osd', 'ceph.osd_size', 'line'],
+ 'lines': []
+ },
+ 'osd_apply_latency': {
+ 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'],
+ 'lines': []
+ },
+ 'osd_commit_latency': {
+ 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'],
+ 'lines': []
+ }
+
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.config_file = self.configuration.get('config_file')
+ self.keyring_file = self.configuration.get('keyring_file')
+ self.rados_id = self.configuration.get('rados_id', 'admin')
+
+ def check(self):
+ """
+ Checks module
+ :return:
+ """
+ if not CEPH:
+ self.error('rados module is needed to use ceph.chart.py')
+ return False
+ if not (self.config_file and self.keyring_file):
+ self.error('config_file and/or keyring_file is not defined')
+ return False
+
+ # Verify files and permissions
+ if not (os.access(self.config_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.F_OK)):
+ self.error('{0} does not exist'.format(self.keyring_file))
+ return False
+ if not (os.access(self.config_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file))
+ return False
+ if not (os.access(self.keyring_file, os.R_OK)):
+ self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file))
+ return False
+ try:
+ self.cluster = rados.Rados(conffile=self.config_file,
+ conf=dict(keyring=self.keyring_file),
+ rados_id=self.rados_id)
+ self.cluster.connect()
+ except rados.Error as error:
+ self.error(error)
+ return False
+ self.create_definitions()
+ return True
+
+ def create_definitions(self):
+ """
+ Create dynamically charts options
+ :return: None
+ """
+ # Pool lines
+ for pool in sorted(self._get_df()['pools'], key=lambda x: sorted(x.keys())):
+ self.definitions['pool_usage']['lines'].append([pool['name'],
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']),
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
+ self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute', 1, 1024])
+ self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
+ self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
+ pool['name'],
+ 'absolute'])
+
+ # OSD lines
+ for osd in sorted(self._get_osd_df()['nodes'], key=lambda x: sorted(x.keys())):
+ self.definitions['osd_usage']['lines'].append([osd['name'],
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_size']['lines'].append(['size_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+ self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']),
+ osd['name'],
+ 'absolute'])
+
+ def get_data(self):
+ """
+ Catch all ceph data
+ :return: dict
+ """
+ try:
+ data = {}
+ df = self._get_df()
+ osd_df = self._get_osd_df()
+ osd_perf = self._get_osd_perf()
+ osd_perf_infos = get_osd_perf_infos(osd_perf)
+ pool_stats = self._get_osd_pool_stats()
+
+ data.update(self._get_general(osd_perf_infos, pool_stats))
+ for pool in df['pools']:
+ data.update(self._get_pool_usage(pool))
+ data.update(self._get_pool_objects(pool))
+ for pool_io in pool_stats:
+ data.update(self._get_pool_rw(pool_io))
+ for osd in osd_df['nodes']:
+ data.update(self._get_osd_usage(osd))
+ data.update(self._get_osd_size(osd))
+ for osd_apply_commit in osd_perf_infos:
+ data.update(self._get_osd_latency(osd_apply_commit))
+ return data
+ except (ValueError, AttributeError) as error:
+ self.error(error)
+ return None
+
+ def _get_general(self, osd_perf_infos, pool_stats):
+ """
+ Get ceph's general usage
+ :return: dict
+ """
+ status = self.cluster.get_cluster_stats()
+ read_bytes_sec = 0
+ write_bytes_sec = 0
+ read_op_per_sec = 0
+ write_op_per_sec = 0
+ apply_latency = 0
+ commit_latency = 0
+
+ for pool_rw_io_b in pool_stats:
+ read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0)
+ write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0)
+ read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0)
+ write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0)
+ for perf in osd_perf_infos:
+ apply_latency += perf['perf_stats']['apply_latency_ms']
+ commit_latency += perf['perf_stats']['commit_latency_ms']
+
+ return {
+ 'general_usage': int(status['kb_used']),
+ 'general_available': int(status['kb_avail']),
+ 'general_objects': int(status['num_objects']),
+ 'general_read_bytes': read_bytes_sec,
+ 'general_write_bytes': write_bytes_sec,
+ 'general_read_operations': read_op_per_sec,
+ 'general_write_operations': write_op_per_sec,
+ 'general_apply_latency': apply_latency,
+ 'general_commit_latency': commit_latency
+ }
+
+ @staticmethod
+ def _get_pool_usage(pool):
+ """
+ Process raw data into pool usage dict information
+ :return: A pool dict with pool name's key and usage bytes' value
+ """
+ return {pool['name']: pool['stats']['kb_used']}
+
+ @staticmethod
+ def _get_pool_objects(pool):
+ """
+ Process raw data into pool usage dict information
+ :return: A pool dict with pool name's key and object numbers
+ """
+ return {'obj_{0}'.format(pool['name']): pool['stats']['objects']}
+
+ @staticmethod
+ def _get_pool_rw(pool):
+ """
+ Get read/write kb and operations in a pool
+ :return: A pool dict with both read/write bytes and operations.
+ """
+ return {
+ 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
+ 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
+ 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
+ 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
+ }
+
+ @staticmethod
+ def _get_osd_usage(osd):
+ """
+ Process raw data into osd dict information to get osd usage
+ :return: A osd dict with osd name's key and usage bytes' value
+ """
+ return {osd['name']: float(osd['kb_used'])}
+
+ @staticmethod
+ def _get_osd_size(osd):
+ """
+ Process raw data into osd dict information to get osd size (kb)
+ :return: A osd dict with osd name's key and size bytes' value
+ """
+ return {'size_{0}'.format(osd['name']): float(osd['kb'])}
+
+ @staticmethod
+ def _get_osd_latency(osd):
+ """
+ Get ceph osd apply and commit latency
+ :return: A osd dict with osd name's key with both apply and commit latency values
+ """
+ return {
+ 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
+ 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']
+ }
+
+ def _get_df(self):
+ """
+ Get ceph df output
+ :return: ceph df --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'df',
+ 'format': 'json'
+ }), b'')[1].decode('utf-8'))
+
+ def _get_osd_df(self):
+ """
+ Get ceph osd df output
+ :return: ceph osd df --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd df',
+ 'format': 'json'
+ }), b'')[1].decode('utf-8').replace('-nan', '"-nan"'))
+
+ def _get_osd_perf(self):
+ """
+ Get ceph osd performance
+ :return: ceph osd perf --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd perf',
+ 'format': 'json'
+ }), b'')[1].decode('utf-8'))
+
+ def _get_osd_pool_stats(self):
+ """
+ Get ceph osd pool status.
+ This command is used to get information about both
+ read/write operation and bytes per second on each pool
+ :return: ceph osd pool stats --format json
+ """
+ return json.loads(self.cluster.mon_command(json.dumps({
+ 'prefix': 'osd pool stats',
+ 'format': 'json'
+ }), b'')[1].decode('utf-8'))
+
+
+def get_osd_perf_infos(osd_perf):
+ # https://github.com/netdata/netdata/issues/8247
+ # module uses 'osd_perf_infos' data, its been moved under 'osdstats` since Ceph v14.2
+ if 'osd_perf_infos' in osd_perf:
+ return osd_perf['osd_perf_infos']
+ return osd_perf['osdstats']['osd_perf_infos']
diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
new file mode 100644
index 00000000..81788e86
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/ceph.conf
@@ -0,0 +1,75 @@
+# netdata python.d.plugin configuration for ceph stats
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 10 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, ceph plugin also supports the following:
+#
+# config_file: 'config_file' # Ceph config file.
+# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group
+# # and keyring file must be read group permission.
+# rados_id: 'rados username' # ID used to connect to ceph cluster. Allows
+# # creating a read only key for pulling data v.s. admin
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+config_file: '/etc/ceph/ceph.conf'
+keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+rados_id: 'admin'
diff --git a/collectors/python.d.plugin/ceph/integrations/ceph.md b/collectors/python.d.plugin/ceph/integrations/ceph.md
new file mode 100644
index 00000000..cfda01fb
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/integrations/ceph.md
@@ -0,0 +1,194 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ceph/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ceph/metadata.yaml"
+sidebar_label: "Ceph"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Ceph
+
+
+<img src="https://netdata.cloud/img/ceph.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: ceph
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.
+
+Uses the `rados` python module to connect to a Ceph cluster.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Ceph instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ceph.general_usage | avail, used | KiB |
+| ceph.general_objects | cluster | objects |
+| ceph.general_bytes | read, write | KiB/s |
+| ceph.general_operations | read, write | operations |
+| ceph.general_latency | apply, commit | milliseconds |
+| ceph.pool_usage | a dimension per Ceph Pool | KiB |
+| ceph.pool_objects | a dimension per Ceph Pool | objects |
+| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |
+| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |
+| ceph.pool_read_operations | a dimension per Ceph Pool | operations |
+| ceph.pool_write_operations | a dimension per Ceph Pool | operations |
+| ceph.osd_usage | a dimension per Ceph OSD | KiB |
+| ceph.osd_size | a dimension per Ceph OSD | KiB |
+| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |
+| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |
+
+
+## Setup
+
+### Prerequisites
+
+#### `rados` python module
+
+Make sure the `rados` python module is installed
+
+#### Granting read permissions to ceph group from keyring file
+
+Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`
+
+#### Create a specific rados_id
+
+You can optionally create a rados_id to use instead of admin
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/ceph.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/ceph.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| config_file | Ceph config file | | yes |
+| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |
+| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |
+
+</details>
+
+#### Examples
+
+##### Basic local Ceph cluster
+
+A basic configuration to connect to a local Ceph cluster.
+
+```yaml
+local:
+ config_file: '/etc/ceph/ceph.conf'
+ keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin ceph debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/ceph/metadata.yaml b/collectors/python.d.plugin/ceph/metadata.yaml
new file mode 100644
index 00000000..0f06470b
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/metadata.yaml
@@ -0,0 +1,223 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: ceph
+ monitored_instance:
+ name: Ceph
+ link: 'https://ceph.io/'
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: 'ceph.svg'
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ''
+ keywords:
+ - ceph
+ - storage
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: 'This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.'
+ method_description: 'Uses the `rados` python module to connect to a Ceph cluster.'
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ''
+ default_behavior:
+ auto_detection:
+ description: ''
+ limits:
+ description: ''
+ performance_impact:
+ description: ''
+ setup:
+ prerequisites:
+ list:
+ - title: '`rados` python module'
+ description: 'Make sure the `rados` python module is installed'
+ - title: 'Granting read permissions to ceph group from keyring file'
+ description: 'Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`'
+ - title: 'Create a specific rados_id'
+ description: 'You can optionally create a rados_id to use instead of admin'
+ configuration:
+ file:
+ name: python.d/ceph.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ''
+ required: false
+ - name: config_file
+ description: Ceph config file
+ default_value: ''
+ required: true
+ - name: keyring_file
+ description: Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission.
+ default_value: ''
+ required: true
+ - name: rados_id
+ description: A rados user id to use for connecting to the Ceph cluster.
+ default_value: 'admin'
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic local Ceph cluster
+ description: A basic configuration to connect to a local Ceph cluster.
+ folding:
+ enabled: false
+ config: |
+ local:
+ config_file: '/etc/ceph/ceph.conf'
+ keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: ceph_cluster_space_usage
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/ceph.conf
+ metric: ceph.general_usage
+ info: cluster disk space utilization
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: ceph.general_usage
+ description: Ceph General Space
+ unit: "KiB"
+ chart_type: stacked
+ dimensions:
+ - name: avail
+ - name: used
+ - name: ceph.general_objects
+ description: Ceph General Objects
+ unit: "objects"
+ chart_type: area
+ dimensions:
+ - name: cluster
+ - name: ceph.general_bytes
+ description: Ceph General Read/Write Data/s
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: ceph.general_operations
+ description: Ceph General Read/Write Operations/s
+ unit: "operations"
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: ceph.general_latency
+ description: Ceph General Apply/Commit latency
+ unit: "milliseconds"
+ chart_type: area
+ dimensions:
+ - name: apply
+ - name: commit
+ - name: ceph.pool_usage
+ description: Ceph Pools
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: a dimension per Ceph Pool
+ - name: ceph.pool_objects
+ description: Ceph Pools
+ unit: "objects"
+ chart_type: line
+ dimensions:
+ - name: a dimension per Ceph Pool
+ - name: ceph.pool_read_bytes
+ description: Ceph Read Pool Data/s
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: a dimension per Ceph Pool
+ - name: ceph.pool_write_bytes
+ description: Ceph Write Pool Data/s
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: a dimension per Ceph Pool
+ - name: ceph.pool_read_operations
+ description: Ceph Read Pool Operations/s
+ unit: "operations"
+ chart_type: area
+ dimensions:
+ - name: a dimension per Ceph Pool
+ - name: ceph.pool_write_operations
+ description: Ceph Write Pool Operations/s
+ unit: "operations"
+ chart_type: area
+ dimensions:
+ - name: a dimension per Ceph Pool
+ - name: ceph.osd_usage
+ description: Ceph OSDs
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: a dimension per Ceph OSD
+ - name: ceph.osd_size
+ description: Ceph OSDs size
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: a dimension per Ceph OSD
+ - name: ceph.apply_latency
+ description: Ceph OSDs apply latency
+ unit: "milliseconds"
+ chart_type: line
+ dimensions:
+ - name: a dimension per Ceph OSD
+ - name: ceph.commit_latency
+ description: Ceph OSDs commit latency
+ unit: "milliseconds"
+ chart_type: line
+ dimensions:
+ - name: a dimension per Ceph OSD
diff --git a/collectors/python.d.plugin/changefinder/Makefile.inc b/collectors/python.d.plugin/changefinder/Makefile.inc
new file mode 100644
index 00000000..01a92408
--- /dev/null
+++ b/collectors/python.d.plugin/changefinder/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += changefinder/changefinder.chart.py
+dist_pythonconfig_DATA += changefinder/changefinder.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += changefinder/README.md changefinder/Makefile.inc
+
diff --git a/collectors/python.d.plugin/changefinder/README.md b/collectors/python.d.plugin/changefinder/README.md
new file mode 120000
index 00000000..0ca704eb
--- /dev/null
+++ b/collectors/python.d.plugin/changefinder/README.md
@@ -0,0 +1 @@
+integrations/python.d_changefinder.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/changefinder/changefinder.chart.py b/collectors/python.d.plugin/changefinder/changefinder.chart.py
new file mode 100644
index 00000000..2a69cd9f
--- /dev/null
+++ b/collectors/python.d.plugin/changefinder/changefinder.chart.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+# Description: changefinder netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from json import loads
+import re
+
+from bases.FrameworkServices.UrlService import UrlService
+
+import numpy as np
+import changefinder
+from scipy.stats import percentileofscore
+
+update_every = 5
+disabled_by_default = True
+
+ORDER = [
+ 'scores',
+ 'flags'
+]
+
+CHARTS = {
+ 'scores': {
+ 'options': [None, 'ChangeFinder', 'score', 'Scores', 'changefinder.scores', 'line'],
+ 'lines': []
+ },
+ 'flags': {
+ 'options': [None, 'ChangeFinder', 'flag', 'Flags', 'changefinder.flags', 'stacked'],
+ 'lines': []
+ }
+}
+
+DEFAULT_PROTOCOL = 'http'
+DEFAULT_HOST = '127.0.0.1:19999'
+DEFAULT_CHARTS_REGEX = 'system.*'
+DEFAULT_MODE = 'per_chart'
+DEFAULT_CF_R = 0.5
+DEFAULT_CF_ORDER = 1
+DEFAULT_CF_SMOOTH = 15
+DEFAULT_CF_DIFF = False
+DEFAULT_CF_THRESHOLD = 99
+DEFAULT_N_SCORE_SAMPLES = 14400
+DEFAULT_SHOW_SCORES = False
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.protocol = self.configuration.get('protocol', DEFAULT_PROTOCOL)
+ self.host = self.configuration.get('host', DEFAULT_HOST)
+ self.url = '{}://{}/api/v1/allmetrics?format=json'.format(self.protocol, self.host)
+ self.charts_regex = re.compile(self.configuration.get('charts_regex', DEFAULT_CHARTS_REGEX))
+ self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
+ self.mode = self.configuration.get('mode', DEFAULT_MODE)
+ self.n_score_samples = int(self.configuration.get('n_score_samples', DEFAULT_N_SCORE_SAMPLES))
+ self.show_scores = int(self.configuration.get('show_scores', DEFAULT_SHOW_SCORES))
+ self.cf_r = float(self.configuration.get('cf_r', DEFAULT_CF_R))
+ self.cf_order = int(self.configuration.get('cf_order', DEFAULT_CF_ORDER))
+ self.cf_smooth = int(self.configuration.get('cf_smooth', DEFAULT_CF_SMOOTH))
+ self.cf_diff = bool(self.configuration.get('cf_diff', DEFAULT_CF_DIFF))
+ self.cf_threshold = float(self.configuration.get('cf_threshold', DEFAULT_CF_THRESHOLD))
+ self.collected_dims = {'scores': set(), 'flags': set()}
+ self.models = {}
+ self.x_latest = {}
+ self.scores_latest = {}
+ self.scores_samples = {}
+
+ def get_score(self, x, model):
+ """Update the score for the model based on most recent data, flag if it's percentile passes self.cf_threshold.
+ """
+
+ # get score
+ if model not in self.models:
+ # initialise empty model if needed
+ self.models[model] = changefinder.ChangeFinder(r=self.cf_r, order=self.cf_order, smooth=self.cf_smooth)
+ # if the update for this step fails then just fallback to last known score
+ try:
+ score = self.models[model].update(x)
+ self.scores_latest[model] = score
+ except Exception as _:
+ score = self.scores_latest.get(model, 0)
+ score = 0 if np.isnan(score) else score
+
+ # update sample scores used to calculate percentiles
+ if model in self.scores_samples:
+ self.scores_samples[model].append(score)
+ else:
+ self.scores_samples[model] = [score]
+ self.scores_samples[model] = self.scores_samples[model][-self.n_score_samples:]
+
+ # convert score to percentile
+ score = percentileofscore(self.scores_samples[model], score)
+
+ # flag based on score percentile
+ flag = 1 if score >= self.cf_threshold else 0
+
+ return score, flag
+
+ def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
+ """If dimension not in chart then add it.
+ """
+ if not self.charts:
+ return
+
+ for dim in data:
+ if dim not in self.collected_dims[chart]:
+ self.collected_dims[chart].add(dim)
+ self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
+
+ for dim in list(self.collected_dims[chart]):
+ if dim not in data:
+ self.collected_dims[chart].remove(dim)
+ self.charts[chart].del_dimension(dim, hide=False)
+
+ def diff(self, x, model):
+ """Take difference of data.
+ """
+ x_diff = x - self.x_latest.get(model, 0)
+ self.x_latest[model] = x
+ x = x_diff
+ return x
+
+ def _get_data(self):
+
+ # pull data from self.url
+ raw_data = self._get_raw_data()
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+
+ # filter to just the data for the charts specified
+ charts_in_scope = list(filter(self.charts_regex.match, raw_data.keys()))
+ charts_in_scope = [c for c in charts_in_scope if c not in self.charts_to_exclude]
+
+ data_score = {}
+ data_flag = {}
+
+ # process each chart
+ for chart in charts_in_scope:
+
+ if self.mode == 'per_chart':
+
+ # average dims on chart and run changefinder on that average
+ x = [raw_data[chart]['dimensions'][dim]['value'] for dim in raw_data[chart]['dimensions']]
+ x = [x for x in x if x is not None]
+
+ if len(x) > 0:
+
+ x = sum(x) / len(x)
+ x = self.diff(x, chart) if self.cf_diff else x
+
+ score, flag = self.get_score(x, chart)
+ if self.show_scores:
+ data_score['{}_score'.format(chart)] = score * 100
+ data_flag[chart] = flag
+
+ else:
+
+ # run changefinder on each individual dim
+ for dim in raw_data[chart]['dimensions']:
+
+ chart_dim = '{}|{}'.format(chart, dim)
+
+ x = raw_data[chart]['dimensions'][dim]['value']
+ x = x if x else 0
+ x = self.diff(x, chart_dim) if self.cf_diff else x
+
+ score, flag = self.get_score(x, chart_dim)
+ if self.show_scores:
+ data_score['{}_score'.format(chart_dim)] = score * 100
+ data_flag[chart_dim] = flag
+
+ self.validate_charts('flags', data_flag)
+
+ if self.show_scores & len(data_score) > 0:
+ data_score['average_score'] = sum(data_score.values()) / len(data_score)
+ self.validate_charts('scores', data_score, divisor=100)
+
+ data = {**data_score, **data_flag}
+
+ return data
diff --git a/collectors/python.d.plugin/changefinder/changefinder.conf b/collectors/python.d.plugin/changefinder/changefinder.conf
new file mode 100644
index 00000000..56a681f1
--- /dev/null
+++ b/collectors/python.d.plugin/changefinder/changefinder.conf
@@ -0,0 +1,74 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+
+local:
+
+ # A friendly name for this job.
+ name: 'local'
+
+ # What host to pull data from.
+ host: '127.0.0.1:19999'
+
+ # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: ''
+
+ # Get ChangeFinder scores 'per_dim' or 'per_chart'.
+ mode: 'per_chart'
+
+ # Default parameters that can be passed to the changefinder library.
+ cf_r: 0.5
+ cf_order: 1
+ cf_smooth: 15
+
+ # The percentile above which scores will be flagged.
+ cf_threshold: 99
+
+ # The number of recent scores to use when calculating the percentile of the changefinder score.
+ n_score_samples: 14400
+
+ # Set to true if you also want to chart the percentile scores in addition to the flags.
+ # Mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time.
+ show_scores: false
diff --git a/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md b/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md
new file mode 100644
index 00000000..c338c937
--- /dev/null
+++ b/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md
@@ -0,0 +1,217 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/changefinder/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/changefinder/metadata.yaml"
+sidebar_label: "python.d changefinder"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Other"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# python.d changefinder
+
+Plugin: python.d.plugin
+Module: changefinder
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to
+perform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)
+on your Netdata charts and/or dimensions.
+
+
+Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example).
+### Notes - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's
+ typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly
+ this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw
+ score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have
+ already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then
+ should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning
+ approaches which need some initial window of time before they can be useful.
+- As this collector does most of the work in Python itself, you may want to try it out first on a test or development
+ system to get a sense of its performance characteristics on a node similar to where you would like to use it.
+- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the
+ typical performance characteristics we saw from running this collector (with defaults) were:
+ - A runtime (`netdata.runtime_changefinder`) of ~30ms.
+ - Typically ~1% additional cpu usage.
+ - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default this collector will work over all `system.*` charts.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per python.d changefinder instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| changefinder.scores | a dimension per chart | score |
+| changefinder.flags | a dimension per chart | flag |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Python Requirements
+
+This collector will only work with Python 3 and requires the packages below be installed.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# install required packages for the netdata user
+pip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4
+```
+
+**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section
+of your `netdata.conf` file.
+
+```yaml
+[ plugin:python.d ]
+ # update every = 1
+ command options = -ppython3
+```
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/changefinder.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/changefinder.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes |
+| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no |
+| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes |
+| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no |
+| cf_order | default parameters that can be passed to the changefinder library. | 1 | no |
+| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no |
+| cf_threshold | the percentile above which scores will be flagged. | 99 | no |
+| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no |
+| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no |
+
+</details>
+
+#### Examples
+
+##### Default
+
+Default configuration.
+
+```yaml
+local:
+ name: 'local'
+ host: '127.0.0.1:19999'
+ charts_regex: 'system\..*'
+ charts_to_exclude: ''
+ mode: 'per_chart'
+ cf_r: 0.5
+ cf_order: 1
+ cf_smooth: 15
+ cf_threshold: 99
+ n_score_samples: 14400
+ show_scores: false
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `changefinder` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin changefinder debug trace
+ ```
+
+### Debug Mode
+
+
+
+### Log Messages
+
+
+
+
diff --git a/collectors/python.d.plugin/changefinder/metadata.yaml b/collectors/python.d.plugin/changefinder/metadata.yaml
new file mode 100644
index 00000000..170d9146
--- /dev/null
+++ b/collectors/python.d.plugin/changefinder/metadata.yaml
@@ -0,0 +1,212 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: changefinder
+ monitored_instance:
+ name: python.d changefinder
+ link: ""
+ categories:
+ - data-collection.other
+ icon_filename: ""
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - change detection
+ - anomaly detection
+ - machine learning
+ - ml
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to
+ perform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)
+ on your Netdata charts and/or dimensions.
+ method_description: >
+ Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a
+ changepoint score for each chart or dimension you configure it to work on. This is
+ an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step
+ to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap
+ to compute at each step of data collection (see the notes section below for more details) and it should scale fairly
+ well to work on lots of charts or hosts (if running on a parent node for example).
+
+ ### Notes
+ - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's
+ typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly
+ this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw
+ score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have
+ already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then
+ should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning
+ approaches which need some initial window of time before they can be useful.
+ - As this collector does most of the work in Python itself, you may want to try it out first on a test or development
+ system to get a sense of its performance characteristics on a node similar to where you would like to use it.
+ - On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the
+ typical performance characteristics we saw from running this collector (with defaults) were:
+ - A runtime (`netdata.runtime_changefinder`) of ~30ms.
+ - Typically ~1% additional cpu usage.
+ - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "By default this collector will work over all `system.*` charts."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Python Requirements
+ description: |
+ This collector will only work with Python 3 and requires the packages below be installed.
+
+ ```bash
+ # become netdata user
+ sudo su -s /bin/bash netdata
+ # install required packages for the netdata user
+ pip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4
+ ```
+
+ **Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section
+ of your `netdata.conf` file.
+
+ ```yaml
+ [ plugin:python.d ]
+ # update every = 1
+ command options = -ppython3
+ ```
+ configuration:
+ file:
+ name: python.d/changefinder.conf
+ description: ""
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: charts_regex
+ description: what charts to pull data for - A regex like `system\..*|` or `system\..*|apps.cpu|apps.mem` etc.
+ default_value: "system\\..*"
+ required: true
+ - name: charts_to_exclude
+ description: |
+ charts to exclude, useful if you would like to exclude some specific charts.
+ note: should be a ',' separated string like 'chart.name,chart.name'.
+ default_value: ""
+ required: false
+ - name: mode
+ description: get ChangeFinder scores 'per_dim' or 'per_chart'.
+ default_value: "per_chart"
+ required: true
+ - name: cf_r
+ description: default parameters that can be passed to the changefinder library.
+ default_value: 0.5
+ required: false
+ - name: cf_order
+ description: default parameters that can be passed to the changefinder library.
+ default_value: 1
+ required: false
+ - name: cf_smooth
+ description: default parameters that can be passed to the changefinder library.
+ default_value: 15
+ required: false
+ - name: cf_threshold
+ description: the percentile above which scores will be flagged.
+ default_value: 99
+ required: false
+ - name: n_score_samples
+ description: the number of recent scores to use when calculating the percentile of the changefinder score.
+ default_value: 14400
+ required: false
+ - name: show_scores
+ description: |
+ set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time)
+ default_value: false
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Default
+ description: Default configuration.
+ folding:
+ enabled: false
+ config: |
+ local:
+ name: 'local'
+ host: '127.0.0.1:19999'
+ charts_regex: 'system\..*'
+ charts_to_exclude: ''
+ mode: 'per_chart'
+ cf_r: 0.5
+ cf_order: 1
+ cf_smooth: 15
+ cf_threshold: 99
+ n_score_samples: 14400
+ show_scores: false
+ troubleshooting:
+ problems:
+ list:
+ - name: "Debug Mode"
+ description: |
+ If you would like to log in as `netdata` user and run the collector in debug mode to see more detail.
+
+ ```bash
+ # become netdata user
+ sudo su -s /bin/bash netdata
+ # run collector in debug using `nolock` option if netdata is already running the collector itself.
+ /usr/libexec/netdata/plugins.d/python.d.plugin changefinder debug trace nolock
+ ```
+ - name: "Log Messages"
+ description: |
+ To see any relevant log messages you can use a command like below.
+
+ ```bash
+ grep 'changefinder' /var/log/netdata/error.log
+ grep 'changefinder' /var/log/netdata/collector.log
+ ```
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: ""
+ labels: []
+ metrics:
+ - name: changefinder.scores
+ description: ChangeFinder
+ unit: "score"
+ chart_type: line
+ dimensions:
+ - name: a dimension per chart
+ - name: changefinder.flags
+ description: ChangeFinder
+ unit: "flag"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per chart
diff --git a/collectors/python.d.plugin/dovecot/Makefile.inc b/collectors/python.d.plugin/dovecot/Makefile.inc
new file mode 100644
index 00000000..fd7d13bb
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += dovecot/dovecot.chart.py
+dist_pythonconfig_DATA += dovecot/dovecot.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += dovecot/README.md dovecot/Makefile.inc
+
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
new file mode 120000
index 00000000..c4749ced
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -0,0 +1 @@
+integrations/dovecot.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
new file mode 100644
index 00000000..dfaef28b
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# Description: dovecot netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+UNIX_SOCKET = '/var/run/dovecot/stats'
+
+ORDER = [
+ 'sessions',
+ 'logins',
+ 'commands',
+ 'faults',
+ 'context_switches',
+ 'io',
+ 'net',
+ 'syscalls',
+ 'lookup',
+ 'cache',
+ 'auth',
+ 'auth_cache'
+]
+
+CHARTS = {
+ 'sessions': {
+ 'options': [None, 'Dovecot Active Sessions', 'number', 'sessions', 'dovecot.sessions', 'line'],
+ 'lines': [
+ ['num_connected_sessions', 'active sessions', 'absolute']
+ ]
+ },
+ 'logins': {
+ 'options': [None, 'Dovecot Logins', 'number', 'logins', 'dovecot.logins', 'line'],
+ 'lines': [
+ ['num_logins', 'logins', 'absolute']
+ ]
+ },
+ 'commands': {
+ 'options': [None, 'Dovecot Commands', 'commands', 'commands', 'dovecot.commands', 'line'],
+ 'lines': [
+ ['num_cmds', 'commands', 'absolute']
+ ]
+ },
+ 'faults': {
+ 'options': [None, 'Dovecot Page Faults', 'faults', 'page faults', 'dovecot.faults', 'line'],
+ 'lines': [
+ ['min_faults', 'minor', 'absolute'],
+ ['maj_faults', 'major', 'absolute']
+ ]
+ },
+ 'context_switches': {
+ 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches',
+ 'line'],
+ 'lines': [
+ ['vol_cs', 'voluntary', 'absolute'],
+ ['invol_cs', 'involuntary', 'absolute']
+ ]
+ },
+ 'io': {
+ 'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'],
+ 'lines': [
+ ['disk_input', 'read', 'incremental', 1, 1024],
+ ['disk_output', 'write', 'incremental', -1, 1024]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
+ 'lines': [
+ ['read_bytes', 'read', 'incremental', 8, 1000],
+ ['write_bytes', 'write', 'incremental', -8, 1000]
+ ]
+ },
+ 'syscalls': {
+ 'options': [None, 'Dovecot Number of SysCalls', 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
+ 'lines': [
+ ['read_count', 'read', 'incremental'],
+ ['write_count', 'write', 'incremental']
+ ]
+ },
+ 'lookup': {
+ 'options': [None, 'Dovecot Lookups', 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
+ 'lines': [
+ ['mail_lookup_path', 'path', 'incremental'],
+ ['mail_lookup_attr', 'attr', 'incremental']
+ ]
+ },
+ 'cache': {
+ 'options': [None, 'Dovecot Cache Hits', 'hits/s', 'cache', 'dovecot.cache', 'line'],
+ 'lines': [
+ ['mail_cache_hits', 'hits', 'incremental']
+ ]
+ },
+ 'auth': {
+ 'options': [None, 'Dovecot Authentications', 'attempts', 'logins', 'dovecot.auth', 'stacked'],
+ 'lines': [
+ ['auth_successes', 'ok', 'absolute'],
+ ['auth_failures', 'failed', 'absolute']
+ ]
+ },
+ 'auth_cache': {
+ 'options': [None, 'Dovecot Authentication Cache', 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
+ 'lines': [
+ ['auth_cache_hits', 'hit', 'absolute'],
+ ['auth_cache_misses', 'miss', 'absolute']
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = None # localhost
+ self.port = None # 24242
+ self.unix_socket = UNIX_SOCKET
+ self.request = 'EXPORT\tglobal\r\n'
+
+ def _get_data(self):
+ """
+ Format data received from socket
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()
+ except (ValueError, AttributeError):
+ return None
+
+ if raw is None:
+ self.debug('dovecot returned no data')
+ return None
+
+ data = raw.split('\n')[:2]
+ desc = data[0].split('\t')
+ vals = data[1].split('\t')
+ ret = dict()
+ for i, _ in enumerate(desc):
+ try:
+ ret[str(desc[i])] = int(vals[i])
+ except ValueError:
+ continue
+ return ret or None
diff --git a/collectors/python.d.plugin/dovecot/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf
new file mode 100644
index 00000000..451dbc9a
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/dovecot.conf
@@ -0,0 +1,98 @@
+# netdata python.d.plugin configuration for dovecot
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, dovecot also supports the following:
+#
+# socket: 'path/to/dovecot/stats'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 24242
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+
+localsocket_old:
+ name : 'local'
+ socket : '/var/run/dovecot/old-stats'
+
diff --git a/collectors/python.d.plugin/dovecot/integrations/dovecot.md b/collectors/python.d.plugin/dovecot/integrations/dovecot.md
new file mode 100644
index 00000000..4e795276
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/integrations/dovecot.md
@@ -0,0 +1,197 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dovecot/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dovecot/metadata.yaml"
+sidebar_label: "Dovecot"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Mail Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dovecot
+
+
+<img src="https://netdata.cloud/img/dovecot.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: dovecot
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.
+
+It uses the dovecot socket and executes the `EXPORT global` command to get the statistics.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Dovecot instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| dovecot.sessions | active sessions | number |
+| dovecot.logins | logins | number |
+| dovecot.commands | commands | commands |
+| dovecot.faults | minor, major | faults |
+| dovecot.context_switches | voluntary, involuntary | switches |
+| dovecot.io | read, write | KiB/s |
+| dovecot.net | read, write | kilobits/s |
+| dovecot.syscalls | read, write | syscalls/s |
+| dovecot.lookup | path, attr | number/s |
+| dovecot.cache | hits | hits/s |
+| dovecot.auth | ok, failed | attempts |
+| dovecot.auth_cache | hit, miss | number |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Dovecot configuration
+
+The Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/dovecot.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/dovecot.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no |
+| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no |
+| port | Used in combination with host, configures the port devcot listens to. | | no |
+
+</details>
+
+#### Examples
+
+##### Local TCP
+
+A basic TCP configuration.
+
+<details><summary>Config</summary>
+
+```yaml
+localtcpip:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 24242
+
+```
+</details>
+
+##### Local socket
+
+A basic local socket configuration
+
+<details><summary>Config</summary>
+
+```yaml
+localsocket:
+ name: 'local'
+ socket: '/var/run/dovecot/stats'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `dovecot` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin dovecot debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/dovecot/metadata.yaml b/collectors/python.d.plugin/dovecot/metadata.yaml
new file mode 100644
index 00000000..b247da84
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/metadata.yaml
@@ -0,0 +1,207 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: dovecot
+ monitored_instance:
+ name: Dovecot
+ link: 'https://www.dovecot.org/'
+ categories:
+ - data-collection.mail-servers
+ icon_filename: 'dovecot.svg'
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ''
+ keywords:
+ - dovecot
+ - imap
+ - mail
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: 'This collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.'
+ method_description: 'It uses the dovecot socket and executes the `EXPORT global` command to get the statistics.'
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ''
+ default_behavior:
+ auto_detection:
+ description: 'If no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`'
+ limits:
+ description: ''
+ performance_impact:
+ description: ''
+ setup:
+ prerequisites:
+ list:
+ - title: 'Dovecot configuration'
+ description: The Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.
+ configuration:
+ file:
+ name: python.d/dovecot.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ''
+ required: false
+ - name: socket
+ description: Use this socket to communicate with Devcot
+ default_value: /var/run/dovecot/stats
+ required: false
+ - name: host
+ description: Instead of using a socket, you can point the collector to an ip for devcot statistics.
+ default_value: ''
+ required: false
+ - name: port
+ description: Used in combination with host, configures the port devcot listens to.
+ default_value: ''
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Local TCP
+ description: A basic TCP configuration.
+ config: |
+ localtcpip:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 24242
+ - name: Local socket
+ description: A basic local socket configuration
+ config: |
+ localsocket:
+ name: 'local'
+ socket: '/var/run/dovecot/stats'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: dovecot.sessions
+ description: Dovecot Active Sessions
+ unit: "number"
+ chart_type: line
+ dimensions:
+ - name: active sessions
+ - name: dovecot.logins
+ description: Dovecot Logins
+ unit: "number"
+ chart_type: line
+ dimensions:
+ - name: logins
+ - name: dovecot.commands
+ description: Dovecot Commands
+ unit: "commands"
+ chart_type: line
+ dimensions:
+ - name: commands
+ - name: dovecot.faults
+ description: Dovecot Page Faults
+ unit: "faults"
+ chart_type: line
+ dimensions:
+ - name: minor
+ - name: major
+ - name: dovecot.context_switches
+ description: Dovecot Context Switches
+ unit: "switches"
+ chart_type: line
+ dimensions:
+ - name: voluntary
+ - name: involuntary
+ - name: dovecot.io
+ description: Dovecot Disk I/O
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: dovecot.net
+ description: Dovecot Network Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: dovecot.syscalls
+ description: Dovecot Number of SysCalls
+ unit: "syscalls/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: dovecot.lookup
+ description: Dovecot Lookups
+ unit: "number/s"
+ chart_type: stacked
+ dimensions:
+ - name: path
+ - name: attr
+ - name: dovecot.cache
+ description: Dovecot Cache Hits
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: dovecot.auth
+ description: Dovecot Authentications
+ unit: "attempts"
+ chart_type: stacked
+ dimensions:
+ - name: ok
+ - name: failed
+ - name: dovecot.auth_cache
+ description: Dovecot Authentication Cache
+ unit: "number"
+ chart_type: stacked
+ dimensions:
+ - name: hit
+ - name: miss
diff --git a/collectors/python.d.plugin/example/Makefile.inc b/collectors/python.d.plugin/example/Makefile.inc
new file mode 100644
index 00000000..1b027d5a
--- /dev/null
+++ b/collectors/python.d.plugin/example/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += example/example.chart.py
+dist_pythonconfig_DATA += example/example.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += example/README.md example/Makefile.inc
+
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
new file mode 120000
index 00000000..55877a99
--- /dev/null
+++ b/collectors/python.d.plugin/example/README.md
@@ -0,0 +1 @@
+integrations/example_collector.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
new file mode 100644
index 00000000..d6c0b665
--- /dev/null
+++ b/collectors/python.d.plugin/example/example.chart.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Description: example netdata python.d module
+# Author: Put your name here (your github login)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from random import SystemRandom
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+priority = 90000
+
+ORDER = [
+ 'random',
+]
+
+CHARTS = {
+ 'random': {
+ 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
+ 'lines': [
+ ['random1']
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.random = SystemRandom()
+ self.num_lines = self.configuration.get('num_lines', 4)
+ self.lower = self.configuration.get('lower', 0)
+ self.upper = self.configuration.get('upper', 100)
+
+ @staticmethod
+ def check():
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ for i in range(0, self.num_lines):
+ dimension_id = ''.join(['random', str(i)])
+
+ if dimension_id not in self.charts['random']:
+ self.charts['random'].add_dimension([dimension_id])
+
+ data[dimension_id] = self.random.randint(self.lower, self.upper)
+
+ return data
diff --git a/collectors/python.d.plugin/example/example.conf b/collectors/python.d.plugin/example/example.conf
new file mode 100644
index 00000000..31261b84
--- /dev/null
+++ b/collectors/python.d.plugin/example/example.conf
@@ -0,0 +1,87 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear on the dashboard
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# num_lines: 4 # the number of lines to create
+# lower: 0 # the lower bound of numbers to randomly sample from
+# upper: 100 # the upper bound of numbers to randomly sample from
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+four_lines:
+ name: "Four Lines" # the JOB's name as it will appear on the dashboard
+ update_every: 1 # the JOB's data collection frequency
+ priority: 60000 # the JOB's order on the dashboard
+ penalty: yes # the JOB's penalty
+ autodetection_retry: 0 # the JOB's re-check interval in seconds
+ num_lines: 4 # the number of lines to create
+ lower: 0 # the lower bound of numbers to randomly sample from
+ upper: 100 # the upper bound of numbers to randomly sample from
+
+# if you wanted to make another job to run in addition to the one above then
+# you would just uncomment the job configuration below.
+# two_lines:
+# name: "Two Lines" # the JOB's name as it will appear on the dashboard
+# num_lines: 2 # the number of lines to create
+# lower: 50 # the lower bound of numbers to randomly sample from
+# upper: 75 # the upper bound of numbers to randomly sample from
diff --git a/collectors/python.d.plugin/example/integrations/example_collector.md b/collectors/python.d.plugin/example/integrations/example_collector.md
new file mode 100644
index 00000000..7dded67b
--- /dev/null
+++ b/collectors/python.d.plugin/example/integrations/example_collector.md
@@ -0,0 +1,171 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/example/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/example/metadata.yaml"
+sidebar_label: "Example collector"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Other"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Example collector
+
+Plugin: python.d.plugin
+Module: example
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Example collector that generates some random numbers as metrics.
+
+If you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.
+
+
+The `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Example collector instance
+
+These metrics refer to the entire monitored application.
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| example.random | random | number |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/example.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/example.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| num_lines | The number of lines to create. | 4 | no |
+| lower | The lower bound of numbers to randomly sample from. | 0 | no |
+| upper | The upper bound of numbers to randomly sample from. | 100 | no |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+four_lines:
+ name: "Four Lines"
+ update_every: 1
+ priority: 60000
+ penalty: yes
+ autodetection_retry: 0
+ num_lines: 4
+ lower: 0
+ upper: 100
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `example` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin example debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/example/metadata.yaml b/collectors/python.d.plugin/example/metadata.yaml
new file mode 100644
index 00000000..eae84d9e
--- /dev/null
+++ b/collectors/python.d.plugin/example/metadata.yaml
@@ -0,0 +1,138 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: example
+ monitored_instance:
+ name: Example collector
+ link: https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/example/README.md
+ categories:
+ - data-collection.other
+ icon_filename: ""
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - example
+ - netdata
+ - python
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Example collector that generates some random numbers as metrics.
+
+ If you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.
+ method_description: |
+ The `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: python.d/example.conf
+ description: ""
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: num_lines
+ description: The number of lines to create.
+ default_value: 4
+ required: false
+ - name: lower
+ description: The lower bound of numbers to randomly sample from.
+ default_value: 0
+ required: false
+ - name: upper
+ description: The upper bound of numbers to randomly sample from.
+ default_value: 100
+ required: false
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 1
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: Config
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ four_lines:
+ name: "Four Lines"
+ update_every: 1
+ priority: 60000
+ penalty: yes
+ autodetection_retry: 0
+ num_lines: 4
+ lower: 0
+ upper: 100
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: |
+ These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: example.random
+ description: A random number
+ unit: number
+ chart_type: line
+ dimensions:
+ - name: random
diff --git a/collectors/python.d.plugin/exim/Makefile.inc b/collectors/python.d.plugin/exim/Makefile.inc
new file mode 100644
index 00000000..36ffa56d
--- /dev/null
+++ b/collectors/python.d.plugin/exim/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += exim/exim.chart.py
+dist_pythonconfig_DATA += exim/exim.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += exim/README.md exim/Makefile.inc
+
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
new file mode 120000
index 00000000..f1f2ef9f
--- /dev/null
+++ b/collectors/python.d.plugin/exim/README.md
@@ -0,0 +1 @@
+integrations/exim.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
new file mode 100644
index 00000000..7238a1be
--- /dev/null
+++ b/collectors/python.d.plugin/exim/exim.chart.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Description: exim netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+EXIM_COMMAND = 'exim -bpc'
+
+ORDER = [
+ 'qemails',
+]
+
+CHARTS = {
+ 'qemails': {
+ 'options': [None, 'Exim Queue Emails', 'emails', 'queue', 'exim.qemails', 'line'],
+ 'lines': [
+ ['emails', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = EXIM_COMMAND
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ try:
+ return {'emails': int(self._get_raw_data()[0])}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/exim/exim.conf b/collectors/python.d.plugin/exim/exim.conf
new file mode 100644
index 00000000..3b7e6592
--- /dev/null
+++ b/collectors/python.d.plugin/exim/exim.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for exim
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# exim is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, exim also supports the following:
+#
+# command: 'exim -bpc' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# REQUIRED exim CONFIGURATION
+#
+# netdata will query exim as user netdata.
+# By default exim will refuse to respond.
+#
+# To allow querying exim as non-admin user, please set the following
+# to your exim configuration:
+#
+# queue_list_requires_admin = false
+#
+# Your exim configuration should be in
+#
+# /etc/exim/exim4.conf
+# or
+# /etc/exim4/conf.d/main/000_local_options
+#
+# Please consult your distribution information to find the exact file.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'exim -bpc'
diff --git a/collectors/python.d.plugin/exim/integrations/exim.md b/collectors/python.d.plugin/exim/integrations/exim.md
new file mode 100644
index 00000000..f0ae33d3
--- /dev/null
+++ b/collectors/python.d.plugin/exim/integrations/exim.md
@@ -0,0 +1,181 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/exim/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/exim/metadata.yaml"
+sidebar_label: "Exim"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Mail Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Exim
+
+
+<img src="https://netdata.cloud/img/exim.jpg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: exim
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Exim mail queue.
+
+It uses the `exim` command line binary to get the statistics.
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+Assuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Exim instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exim.qemails | emails | emails |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Exim configuration - local installation
+
+The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.
+
+1. Edit the `exim` configuration with your preferred editor and add:
+`queue_list_requires_admin = false`
+2. Restart `exim` and Netdata
+
+
+#### Exim configuration - WHM (CPanel) server
+
+On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.
+
+1. Login to WHM
+2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor
+3. Scroll down to the button **Add additional configuration setting** and click on it.
+4. In the new dropdown which will appear above we need to find and choose:
+`queue_list_requires_admin` and set to `false`
+5. Scroll to the end and click the **Save** button.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/exim.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/exim.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| command | Path and command to the `exim` binary | exim -bpc | no |
+
+</details>
+
+#### Examples
+
+##### Local exim install
+
+A basic local exim install
+
+```yaml
+local:
+ command: 'exim -bpc'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `exim` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin exim debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/exim/metadata.yaml b/collectors/python.d.plugin/exim/metadata.yaml
new file mode 100644
index 00000000..a8be02d9
--- /dev/null
+++ b/collectors/python.d.plugin/exim/metadata.yaml
@@ -0,0 +1,132 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: exim
+ monitored_instance:
+ name: Exim
+ link: "https://www.exim.org/"
+ categories:
+ - data-collection.mail-servers
+ icon_filename: "exim.jpg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - exim
+ - mail
+ - server
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors Exim mail queue."
+ method_description: "It uses the `exim` command line binary to get the statistics."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "Assuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "Exim configuration - local installation"
+ description: |
+ The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.
+
+ 1. Edit the `exim` configuration with your preferred editor and add:
+ `queue_list_requires_admin = false`
+ 2. Restart `exim` and Netdata
+ - title: "Exim configuration - WHM (CPanel) server"
+ description: |
+ On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.
+
+ 1. Login to WHM
+ 2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor
+ 3. Scroll down to the button **Add additional configuration setting** and click on it.
+ 4. In the new dropdown which will appear above we need to find and choose:
+ `queue_list_requires_admin` and set to `false`
+ 5. Scroll to the end and click the **Save** button.
+ configuration:
+ file:
+ name: python.d/exim.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: command
+ description: Path and command to the `exim` binary
+ default_value: "exim -bpc"
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Local exim install
+ description: A basic local exim install
+ config: |
+ local:
+ command: 'exim -bpc'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: exim.qemails
+ description: Exim Queue Emails
+ unit: "emails"
+ chart_type: line
+ dimensions:
+ - name: emails
diff --git a/collectors/python.d.plugin/fail2ban/Makefile.inc b/collectors/python.d.plugin/fail2ban/Makefile.inc
new file mode 100644
index 00000000..31e117e5
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += fail2ban/fail2ban.chart.py
+dist_pythonconfig_DATA += fail2ban/fail2ban.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += fail2ban/README.md fail2ban/Makefile.inc
+
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
new file mode 120000
index 00000000..642a8bcf
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -0,0 +1 @@
+integrations/fail2ban.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
new file mode 100644
index 00000000..76f6d92b
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+# Description: fail2ban log netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from collections import defaultdict
+from glob import glob
+
+from bases.FrameworkServices.LogService import LogService
+
+ORDER = [
+ 'jails_failed_attempts',
+ 'jails_bans',
+ 'jails_banned_ips',
+]
+
+
+def charts(jails):
+ """
+ Chart definitions creating
+ """
+
+ ch = {
+ ORDER[0]: {
+ 'options': [None, 'Failed attempts', 'attempts/s', 'failed attempts', 'fail2ban.failed_attempts', 'line'],
+ 'lines': []
+ },
+ ORDER[1]: {
+ 'options': [None, 'Bans', 'bans/s', 'bans', 'fail2ban.bans', 'line'],
+ 'lines': []
+ },
+ ORDER[2]: {
+ 'options': [None, 'Banned IP addresses (since the last restart of netdata)', 'ips', 'banned ips',
+ 'fail2ban.banned_ips', 'line'],
+ 'lines': []
+ },
+ }
+ for jail in jails:
+ dim = ['{0}_failed_attempts'.format(jail), jail, 'incremental']
+ ch[ORDER[0]]['lines'].append(dim)
+
+ dim = [jail, jail, 'incremental']
+ ch[ORDER[1]]['lines'].append(dim)
+
+ dim = ['{0}_in_jail'.format(jail), jail, 'absolute']
+ ch[ORDER[2]]['lines'].append(dim)
+
+ return ch
+
+
+RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= +(true|yes|false|no)')
+
+ACTION_BAN = 'Ban'
+ACTION_UNBAN = 'Unban'
+ACTION_RESTORE_BAN = 'Restore Ban'
+ACTION_FOUND = 'Found'
+
+# Example:
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Found 203.0.113.1
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 203.0.113.1
+# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Restore Ban 203.0.113.1
+# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 203.0.113.1
+RE_DATA = re.compile(
+ r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>{0}|{1}|{2}|{3}) (?P<ip>[a-f0-9.:]+)'.format(
+ ACTION_BAN, ACTION_UNBAN, ACTION_RESTORE_BAN, ACTION_FOUND
+ )
+)
+
+DEFAULT_JAILS = [
+ 'ssh',
+]
+
+
+class Service(LogService):
+ def __init__(self, configuration=None, name=None):
+ LogService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = dict()
+ self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
+ self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
+ self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
+ self.exclude = self.configuration.get('exclude', str())
+ self.monitoring_jails = list()
+ self.banned_ips = defaultdict(set)
+ self.data = dict()
+
+ def check(self):
+ """
+ :return: bool
+ """
+ if not self.conf_path.endswith(('.conf', '.local')):
+ self.error('{0} is a wrong conf path name, must be *.conf or *.local'.format(self.conf_path))
+ return False
+
+ if not os.access(self.log_path, os.R_OK):
+ self.error('{0} is not readable'.format(self.log_path))
+ return False
+
+ if os.path.getsize(self.log_path) == 0:
+ self.error('{0} is empty'.format(self.log_path))
+ return False
+
+ self.monitoring_jails = self.jails_auto_detection()
+ for jail in self.monitoring_jails:
+ self.data['{0}_failed_attempts'.format(jail)] = 0
+ self.data[jail] = 0
+ self.data['{0}_in_jail'.format(jail)] = 0
+
+ self.definitions = charts(self.monitoring_jails)
+ self.info('monitoring jails: {0}'.format(self.monitoring_jails))
+
+ return True
+
+ def get_data(self):
+ """
+ :return: dict
+ """
+ raw = self._get_raw_data()
+
+ if not raw:
+ return None if raw is None else self.data
+
+ for row in raw:
+ match = RE_DATA.search(row)
+
+ if not match:
+ continue
+
+ match = match.groupdict()
+
+ if match['jail'] not in self.monitoring_jails:
+ continue
+
+ jail, action, ip = match['jail'], match['action'], match['ip']
+
+ if action == ACTION_FOUND:
+ self.data['{0}_failed_attempts'.format(jail)] += 1
+ elif action in (ACTION_BAN, ACTION_RESTORE_BAN):
+ self.data[jail] += 1
+ if ip not in self.banned_ips[jail]:
+ self.banned_ips[jail].add(ip)
+ self.data['{0}_in_jail'.format(jail)] += 1
+ elif action == ACTION_UNBAN:
+ if ip in self.banned_ips[jail]:
+ self.banned_ips[jail].remove(ip)
+ self.data['{0}_in_jail'.format(jail)] -= 1
+
+ return self.data
+
+ def get_files_from_dir(self, dir_path, suffix):
+ """
+ :return: list
+ """
+ if not os.path.isdir(dir_path):
+ self.error('{0} is not a directory'.format(dir_path))
+ return list()
+
+ return glob('{0}/*.{1}'.format(self.conf_dir, suffix))
+
+ def get_jails_from_file(self, file_path):
+ """
+ :return: list
+ """
+ if not os.access(file_path, os.R_OK):
+ self.error('{0} is not readable or not exist'.format(file_path))
+ return list()
+
+ with open(file_path, 'rt') as f:
+ lines = f.readlines()
+ raw = ' '.join(line for line in lines if line.startswith(('[', 'enabled')))
+
+ match = RE_JAILS.findall(raw)
+ # Result: [('ssh', 'true'), ('dropbear', 'true'), ('pam-generic', 'true'), ...]
+
+ if not match:
+ self.debug('{0} parse failed'.format(file_path))
+ return list()
+
+ return match
+
+ def jails_auto_detection(self):
+ """
+ :return: list
+
+ Parses jail configuration files. Returns list of enabled jails.
+ According man jail.conf parse order must be
+ * jail.conf
+ * jail.d/*.conf (in alphabetical order)
+ * jail.local
+ * jail.d/*.local (in alphabetical order)
+ """
+ jails_files, all_jails, active_jails = list(), list(), list()
+
+ jails_files.append('{0}.conf'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'conf'))
+ jails_files.append('{0}.local'.format(self.conf_path.rsplit('.')[0]))
+ jails_files.extend(self.get_files_from_dir(self.conf_dir, 'local'))
+
+ self.debug('config files to parse: {0}'.format(jails_files))
+
+ for f in jails_files:
+ all_jails.extend(self.get_jails_from_file(f))
+
+ exclude = self.exclude.split()
+
+ for name, status in all_jails:
+ if name in exclude:
+ continue
+
+ if status in ('true', 'yes') and name not in active_jails:
+ active_jails.append(name)
+ elif status in ('false', 'no') and name in active_jails:
+ active_jails.remove(name)
+
+ return active_jails or DEFAULT_JAILS
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf
new file mode 100644
index 00000000..a36436b5
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for fail2ban
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, fail2ban also supports the following:
+#
+# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log'
+# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local'
+# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/'
+# exclude: 'jails you want to exclude from autodetection' # Default: none
+#------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md b/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md
new file mode 100644
index 00000000..a7116be5
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md
@@ -0,0 +1,209 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/fail2ban/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/fail2ban/metadata.yaml"
+sidebar_label: "Fail2ban"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Fail2ban
+
+
+<img src="https://netdata.cloud/img/fail2ban.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: fail2ban
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Fail2ban performance for prime intrusion prevention operations. Monitor ban counts, jail statuses, and failed login attempts to ensure robust network security.
+
+
+It collects metrics through reading the default log and configuration files of fail2ban.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+The `fail2ban.log` file must be readable by the user `netdata`.
+ - change the file ownership and access permissions.
+ - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file.
+
+To change the file ownership and access permissions, execute the following:
+
+```shell
+sudo chown root:netdata /var/log/fail2ban.log
+sudo chmod 640 /var/log/fail2ban.log
+```
+
+To persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`:
+
+```shell
+/var/log/fail2ban.log {
+
+ weekly
+ rotate 4
+ compress
+
+ delaycompress
+ missingok
+ postrotate
+ fail2ban-client flushlogs 1>/dev/null
+ endscript
+
+ # If fail2ban runs as non-root it still needs to have write access
+ # to logfiles.
+ # create 640 fail2ban adm
+ create 640 root netdata
+}
+```
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local.
+If conf file is not found default jail is ssh.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Fail2ban instance
+
+These metrics refer to the entire monitored application.
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| fail2ban.failed_attempts | a dimension per jail | attempts/s |
+| fail2ban.bans | a dimension per jail | bans/s |
+| fail2ban.banned_ips | a dimension per jail | ips |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/fail2ban.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/fail2ban.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| log_path | path to fail2ban.log. | /var/log/fail2ban.log | no |
+| conf_path | path to jail.local/jail.conf. | /etc/fail2ban/jail.local | no |
+| conf_dir | path to jail.d/. | /etc/fail2ban/jail.d/ | no |
+| exclude | jails you want to exclude from autodetection. | | no |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+local:
+ log_path: '/var/log/fail2ban.log'
+ conf_path: '/etc/fail2ban/jail.local'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `fail2ban` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin fail2ban debug trace
+ ```
+
+### Debug Mode
+
+
+
+
diff --git a/collectors/python.d.plugin/fail2ban/metadata.yaml b/collectors/python.d.plugin/fail2ban/metadata.yaml
new file mode 100644
index 00000000..61f76267
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/metadata.yaml
@@ -0,0 +1,200 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: fail2ban
+ monitored_instance:
+ name: Fail2ban
+ link: https://www.fail2ban.org/
+ categories:
+ - data-collection.authentication-and-authorization
+ icon_filename: "fail2ban.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - fail2ban
+ - security
+ - authentication
+ - authorization
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Monitor Fail2ban performance for prime intrusion prevention operations. Monitor ban counts, jail statuses, and failed login attempts to ensure robust network security.
+ method_description: |
+ It collects metrics through reading the default log and configuration files of fail2ban.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: |
+ The `fail2ban.log` file must be readable by the user `netdata`.
+ - change the file ownership and access permissions.
+ - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file.
+
+ To change the file ownership and access permissions, execute the following:
+
+ ```shell
+ sudo chown root:netdata /var/log/fail2ban.log
+ sudo chmod 640 /var/log/fail2ban.log
+ ```
+
+ To persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`:
+
+ ```shell
+ /var/log/fail2ban.log {
+
+ weekly
+ rotate 4
+ compress
+
+ delaycompress
+ missingok
+ postrotate
+ fail2ban-client flushlogs 1>/dev/null
+ endscript
+
+ # If fail2ban runs as non-root it still needs to have write access
+ # to logfiles.
+ # create 640 fail2ban adm
+ create 640 root netdata
+ }
+ ```
+ default_behavior:
+ auto_detection:
+ description: |
+ By default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local.
+ If conf file is not found default jail is ssh.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: python.d/fail2ban.conf
+ description: ""
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: log_path
+ description: path to fail2ban.log.
+ default_value: /var/log/fail2ban.log
+ required: false
+ - name: conf_path
+ description: path to jail.local/jail.conf.
+ default_value: /etc/fail2ban/jail.local
+ required: false
+ - name: conf_dir
+ description: path to jail.d/.
+ default_value: /etc/fail2ban/jail.d/
+ required: false
+ - name: exclude
+ description: jails you want to exclude from autodetection.
+ default_value: ""
+ required: false
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 1
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: Config
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ local:
+ log_path: '/var/log/fail2ban.log'
+ conf_path: '/etc/fail2ban/jail.local'
+ troubleshooting:
+ problems:
+ list:
+ - name: Debug Mode
+ description: |
+ To troubleshoot issues with the `fail2ban` module, run the `python.d.plugin` with the debug option enabled.
+ The output will give you the output of the data collection job or error messages on why the collector isn't working.
+
+ First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
+ not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
+ plugin's directory, switch to the `netdata` user.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ sudo su -s /bin/bash netdata
+ ```
+
+ Now you can manually run the `fail2ban` module in debug mode:
+
+ ```bash
+ ./python.d.plugin fail2ban debug trace
+ ```
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: |
+ These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: fail2ban.failed_attempts
+ description: Failed attempts
+ unit: "attempts/s"
+ chart_type: line
+ dimensions:
+ - name: a dimension per jail
+ - name: fail2ban.bans
+ description: Bans
+ unit: "bans/s"
+ chart_type: line
+ dimensions:
+ - name: a dimension per jail
+ - name: fail2ban.banned_ips
+ description: Banned IP addresses (since the last restart of netdata)
+ unit: "ips"
+ chart_type: line
+ dimensions:
+ - name: a dimension per jail
diff --git a/collectors/python.d.plugin/gearman/Makefile.inc b/collectors/python.d.plugin/gearman/Makefile.inc
new file mode 100644
index 00000000..275adf1c
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += gearman/gearman.chart.py
+dist_pythonconfig_DATA += gearman/gearman.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += gearman/README.md gearman/Makefile.inc
+
diff --git a/collectors/python.d.plugin/gearman/README.md b/collectors/python.d.plugin/gearman/README.md
new file mode 120000
index 00000000..70189d69
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/README.md
@@ -0,0 +1 @@
+integrations/gearman.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/gearman/gearman.chart.py b/collectors/python.d.plugin/gearman/gearman.chart.py
new file mode 100644
index 00000000..5e280a4d
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/gearman.chart.py
@@ -0,0 +1,243 @@
+# Description: dovecot netdata python.d module
+# Author: Kyle Agronick (agronick)
+# SPDX-License-Identifier: GPL-3.0+
+
+# Gearman Netdata Plugin
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+CHARTS = {
+ 'total_workers': {
+ 'options': [None, 'Total Jobs', 'Jobs', 'Total Jobs', 'gearman.total_jobs', 'line'],
+ 'lines': [
+ ['total_pending', 'Pending', 'absolute'],
+ ['total_running', 'Running', 'absolute'],
+ ]
+ },
+}
+
+
+def job_chart_template(job_name):
+ return {
+ 'options': [None, job_name, 'Jobs', 'Activity by Job', 'gearman.single_job', 'stacked'],
+ 'lines': [
+ ['{0}_pending'.format(job_name), 'Pending', 'absolute'],
+ ['{0}_idle'.format(job_name), 'Idle', 'absolute'],
+ ['{0}_running'.format(job_name), 'Running', 'absolute'],
+ ]
+ }
+
+
+def build_result_dict(job):
+ """
+ Get the status for each job
+ :return: dict
+ """
+
+ total, running, available = job['metrics']
+
+ idle = available - running
+ pending = total - running
+
+ return {
+ '{0}_pending'.format(job['job_name']): pending,
+ '{0}_idle'.format(job['job_name']): idle,
+ '{0}_running'.format(job['job_name']): running,
+ }
+
+
+def parse_worker_data(job):
+ job_name = job[0]
+ job_metrics = job[1:]
+
+ return {
+ 'job_name': job_name,
+ 'metrics': job_metrics,
+ }
+
+
+class GearmanReadException(BaseException):
+ pass
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.request = "status\n"
+ self._keep_alive = True
+
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 4730)
+
+ self.tls = self.configuration.get('tls', False)
+ self.cert = self.configuration.get('cert', None)
+ self.key = self.configuration.get('key', None)
+
+ self.active_jobs = set()
+ self.definitions = deepcopy(CHARTS)
+ self.order = ['total_workers']
+
+ def _get_data(self):
+ """
+ Format data received from socket
+ :return: dict
+ """
+
+ try:
+ active_jobs = self.get_active_jobs()
+ except GearmanReadException:
+ return None
+
+ found_jobs, job_data = self.process_jobs(active_jobs)
+ self.remove_stale_jobs(found_jobs)
+ return job_data
+
+ def get_active_jobs(self):
+ active_jobs = []
+
+ for job in self.get_worker_data():
+ parsed_job = parse_worker_data(job)
+
+ # Gearman does not clean up old jobs
+ # We only care about jobs that have
+ # some relevant data
+ if not any(parsed_job['metrics']):
+ continue
+
+ active_jobs.append(parsed_job)
+
+ return active_jobs
+
+ def get_worker_data(self):
+ """
+ Split the data returned from Gearman
+ into a list of lists
+
+ This returns the same output that you
+ would get from a gearadmin --status
+ command.
+
+ Example output returned from
+ _get_raw_data():
+ prefix generic_worker4 78 78 500
+ generic_worker2 78 78 500
+ generic_worker3 0 0 760
+ generic_worker1 0 0 500
+
+ :return: list
+ """
+
+ try:
+ raw = self._get_raw_data()
+ except (ValueError, AttributeError):
+ raise GearmanReadException()
+
+ if raw is None:
+ self.debug("Gearman returned no data")
+ raise GearmanReadException()
+
+ workers = list()
+
+ for line in raw.splitlines()[:-1]:
+ parts = line.split()
+ if not parts:
+ continue
+
+ name = '_'.join(parts[:-3])
+ try:
+ values = [int(w) for w in parts[-3:]]
+ except ValueError:
+ continue
+
+ w = [name]
+ w.extend(values)
+ workers.append(w)
+
+ return workers
+
+ def process_jobs(self, active_jobs):
+
+ output = {
+ 'total_pending': 0,
+ 'total_idle': 0,
+ 'total_running': 0,
+ }
+ found_jobs = set()
+
+ for parsed_job in active_jobs:
+
+ job_name = self.add_job(parsed_job)
+ found_jobs.add(job_name)
+ job_data = build_result_dict(parsed_job)
+
+ for sum_value in ('pending', 'running', 'idle'):
+ output['total_{0}'.format(sum_value)] += job_data['{0}_{1}'.format(job_name, sum_value)]
+
+ output.update(job_data)
+
+ return found_jobs, output
+
+ def remove_stale_jobs(self, active_job_list):
+ """
+ Removes jobs that have no workers, pending jobs,
+ or running jobs
+ :param active_job_list: The latest list of active jobs
+ :type active_job_list: iterable
+ :return: None
+ """
+
+ for to_remove in self.active_jobs - active_job_list:
+ self.remove_job(to_remove)
+
+ def add_job(self, parsed_job):
+ """
+ Adds a job to the list of active jobs
+ :param parsed_job: A parsed job dict
+ :type parsed_job: dict
+ :return: None
+ """
+
+ def add_chart(job_name):
+ """
+ Adds a new job chart
+ :param job_name: The name of the job to add
+ :type job_name: string
+ :return: None
+ """
+
+ job_key = 'job_{0}'.format(job_name)
+ template = job_chart_template(job_name)
+ new_chart = self.charts.add_chart([job_key] + template['options'])
+ for dimension in template['lines']:
+ new_chart.add_dimension(dimension)
+
+ if parsed_job['job_name'] not in self.active_jobs:
+ add_chart(parsed_job['job_name'])
+ self.active_jobs.add(parsed_job['job_name'])
+
+ return parsed_job['job_name']
+
+ def remove_job(self, job_name):
+ """
+ Removes a job to the list of active jobs
+ :param job_name: The name of the job to remove
+ :type job_name: string
+ :return: None
+ """
+
+ def remove_chart(job_name):
+ """
+ Removes a job chart
+ :param job_name: The name of the job to remove
+ :type job_name: string
+ :return: None
+ """
+
+ job_key = 'job_{0}'.format(job_name)
+ self.charts[job_key].obsolete()
+ del self.charts[job_key]
+
+ remove_chart(job_name)
+ self.active_jobs.remove(job_name)
diff --git a/collectors/python.d.plugin/gearman/gearman.conf b/collectors/python.d.plugin/gearman/gearman.conf
new file mode 100644
index 00000000..635e893e
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/gearman.conf
@@ -0,0 +1,75 @@
+# netdata python.d.plugin configuration for gearman
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, gearman also supports the following:
+#
+# host: localhost # The host running the Gearman server
+# port: 4730 # Port of the Gearman server
+# tls: no # Whether to use TLS or not
+# cert: /path/to/cert # Path to cert if using TLS
+# key: /path/to/key # Path to key if using TLS
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOB
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 4730 \ No newline at end of file
diff --git a/collectors/python.d.plugin/gearman/integrations/gearman.md b/collectors/python.d.plugin/gearman/integrations/gearman.md
new file mode 100644
index 00000000..3923d140
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/integrations/gearman.md
@@ -0,0 +1,210 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/gearman/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/gearman/metadata.yaml"
+sidebar_label: "Gearman"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Distributed Computing Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Gearman
+
+
+<img src="https://netdata.cloud/img/gearman.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: gearman
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management.
+
+This collector connects to a Gearman instance via either TCP or unix socket.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+When no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Gearman instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| gearman.total_jobs | Pending, Running | Jobs |
+
+### Per gearman job
+
+Metrics related to Gearman jobs. Each job produces its own set of the following metrics.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| gearman.single_job | Pending, Idle, Runnning | Jobs |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ gearman_workers_queued ](https://github.com/netdata/netdata/blob/master/health/health.d/gearman.conf) | gearman.single_job | average number of queued jobs over the last 10 minutes |
+
+
+## Setup
+
+### Prerequisites
+
+#### Socket permissions
+
+The gearman UNIX socket should have read permission for user netdata.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/gearman.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/gearman.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| host | URL or IP where gearman is running. | localhost | no |
+| port | Port of URL or IP where gearman is running. | 4730 | no |
+| tls | Use tls to connect to gearman. | false | no |
+| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no |
+| key | Provide a key file if needed to connect to a TLS gearman instance. | | no |
+
+</details>
+
+#### Examples
+
+##### Local gearman service
+
+A basic host and port gearman configuration for localhost.
+
+```yaml
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 4730
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 4730
+
+remote:
+ name: 'remote'
+ host: '192.0.2.1'
+ port: 4730
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `gearman` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin gearman debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/gearman/metadata.yaml b/collectors/python.d.plugin/gearman/metadata.yaml
new file mode 100644
index 00000000..f1760568
--- /dev/null
+++ b/collectors/python.d.plugin/gearman/metadata.yaml
@@ -0,0 +1,168 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: gearman
+ monitored_instance:
+ name: Gearman
+ link: "http://gearman.org/"
+ categories:
+ - data-collection.distributed-computing-systems
+ icon_filename: "gearman.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - gearman
+ - gearman job server
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management."
+ method_description: "This collector connects to a Gearman instance via either TCP or unix socket."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "When no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "Socket permissions"
+ description: The gearman UNIX socket should have read permission for user netdata.
+ configuration:
+ file:
+ name: python.d/gearman.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: host
+ description: URL or IP where gearman is running.
+ default_value: "localhost"
+ required: false
+ - name: port
+ description: Port of URL or IP where gearman is running.
+ default_value: "4730"
+ required: false
+ - name: tls
+ description: Use tls to connect to gearman.
+ default_value: "false"
+ required: false
+ - name: cert
+ description: Provide a certificate file if needed to connect to a TLS gearman instance.
+ default_value: ""
+ required: false
+ - name: key
+ description: Provide a key file if needed to connect to a TLS gearman instance.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Local gearman service
+ description: A basic host and port gearman configuration for localhost.
+ folding:
+ enabled: false
+ config: |
+ localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 4730
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 4730
+
+ remote:
+ name: 'remote'
+ host: '192.0.2.1'
+ port: 4730
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: gearman_workers_queued
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/gearman.conf
+ metric: gearman.single_job
+ info: average number of queued jobs over the last 10 minutes
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: gearman.total_jobs
+ description: Total Jobs
+ unit: "Jobs"
+ chart_type: line
+ dimensions:
+ - name: Pending
+ - name: Running
+ - name: gearman job
+ description: "Metrics related to Gearman jobs. Each job produces its own set of the following metrics."
+ labels: []
+ metrics:
+ - name: gearman.single_job
+ description: "{job_name}"
+ unit: "Jobs"
+ chart_type: stacked
+ dimensions:
+ - name: Pending
+ - name: Idle
+ - name: Runnning
diff --git a/collectors/python.d.plugin/go_expvar/Makefile.inc b/collectors/python.d.plugin/go_expvar/Makefile.inc
new file mode 100644
index 00000000..74f50d76
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += go_expvar/go_expvar.chart.py
+dist_pythonconfig_DATA += go_expvar/go_expvar.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += go_expvar/README.md go_expvar/Makefile.inc
+
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
new file mode 120000
index 00000000..f28a82f3
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -0,0 +1 @@
+integrations/go_applications_expvar.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
new file mode 100644
index 00000000..dca01081
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+# Description: go_expvar netdata python.d module
+# Author: Jan Kral (kralewitz)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import division
+
+import json
+from collections import namedtuple
+
+from bases.FrameworkServices.UrlService import UrlService
+
+MEMSTATS_ORDER = [
+ 'memstats_heap',
+ 'memstats_stack',
+ 'memstats_mspan',
+ 'memstats_mcache',
+ 'memstats_sys',
+ 'memstats_live_objects',
+ 'memstats_gc_pauses',
+]
+
+MEMSTATS_CHARTS = {
+ 'memstats_heap': {
+ 'options': ['heap', 'memory: size of heap memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.heap', 'line'],
+ 'lines': [
+ ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
+ ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_stack': {
+ 'options': ['stack', 'memory: size of stack memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.stack', 'line'],
+ 'lines': [
+ ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_mspan': {
+ 'options': ['mspan', 'memory: size of mspan memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.mspan', 'line'],
+ 'lines': [
+ ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_mcache': {
+ 'options': ['mcache', 'memory: size of mcache memory structures', 'KiB', 'memstats',
+ 'expvar.memstats.mcache', 'line'],
+ 'lines': [
+ ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_live_objects': {
+ 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats',
+ 'expvar.memstats.live_objects', 'line'],
+ 'lines': [
+ ['memstats_live_objects', 'live']
+ ]
+ },
+ 'memstats_sys': {
+ 'options': ['sys', 'memory: size of reserved virtual address space', 'KiB', 'memstats',
+ 'expvar.memstats.sys', 'line'],
+ 'lines': [
+ ['memstats_sys', 'sys', 'absolute', 1, 1024]
+ ]
+ },
+ 'memstats_gc_pauses': {
+ 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats',
+ 'expvar.memstats.gc_pauses', 'line'],
+ 'lines': [
+ ['memstats_gc_pauses', 'avg']
+ ]
+ }
+}
+
+EXPVAR = namedtuple(
+ "EXPVAR",
+ [
+ "key",
+ "type",
+ "id",
+ ]
+)
+
+
+def flatten(d, top='', sep='.'):
+ items = []
+ for key, val in d.items():
+ nkey = top + sep + key if top else key
+ if isinstance(val, dict):
+ items.extend(flatten(val, nkey, sep=sep).items())
+ else:
+ items.append((nkey, val))
+ return dict(items)
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ # if memstats collection is enabled, add the charts and their order
+ if self.configuration.get('collect_memstats'):
+ self.definitions = dict(MEMSTATS_CHARTS)
+ self.order = list(MEMSTATS_ORDER)
+ else:
+ self.definitions = dict()
+ self.order = list()
+
+ # if extra charts are defined, parse their config
+ extra_charts = self.configuration.get('extra_charts')
+ if extra_charts:
+ self._parse_extra_charts_config(extra_charts)
+
+ def check(self):
+ """
+ Check if the module can collect data:
+ 1) At least one JOB configuration has to be specified
+ 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one
+ extra_chart must be defined.
+
+ The configuration and URL check is provided by the UrlService class.
+ """
+
+ if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')):
+ self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.')
+ return False
+
+ return UrlService.check(self)
+
+ def _parse_extra_charts_config(self, extra_charts_config):
+
+ # a place to store the expvar keys and their types
+ self.expvars = list()
+
+ for chart in extra_charts_config:
+
+ chart_dict = dict()
+ chart_id = chart.get('id')
+ chart_lines = chart.get('lines')
+ chart_opts = chart.get('options', dict())
+
+ if not all([chart_id, chart_lines]):
+ self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart))
+ continue
+
+ chart_dict['options'] = [
+ chart_opts.get('name', ''),
+ chart_opts.get('title', ''),
+ chart_opts.get('units', ''),
+ chart_opts.get('family', ''),
+ chart_opts.get('context', ''),
+ chart_opts.get('chart_type', 'line')
+ ]
+ chart_dict['lines'] = list()
+
+ # add the lines to the chart
+ for line in chart_lines:
+
+ ev_key = line.get('expvar_key')
+ ev_type = line.get('expvar_type')
+ line_id = line.get('id')
+
+ if not all([ev_key, ev_type, line_id]):
+ self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line))
+ continue
+
+ if ev_type not in ['int', 'float']:
+ self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
+ continue
+
+ # self.expvars[ev_key] = (ev_type, line_id)
+ self.expvars.append(EXPVAR(ev_key, ev_type, line_id))
+
+ chart_dict['lines'].append(
+ [
+ line.get('id', ''),
+ line.get('name', ''),
+ line.get('algorithm', ''),
+ line.get('multiplier', 1),
+ line.get('divisor', 100 if ev_type == 'float' else 1),
+ line.get('hidden', False)
+ ]
+ )
+
+ self.order.append(chart_id)
+ self.definitions[chart_id] = chart_dict
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ data = json.loads(raw_data)
+
+ expvars = dict()
+ if self.configuration.get('collect_memstats'):
+ expvars.update(self._parse_memstats(data))
+
+ if self.configuration.get('extra_charts'):
+ # the memstats part of the data has been already parsed, so we remove it before flattening and checking
+ # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
+ del (data['memstats'])
+ flattened = flatten(data)
+
+ for ev in self.expvars:
+ v = flattened.get(ev.key)
+
+ if v is None:
+ continue
+
+ try:
+ if ev.type == 'int':
+ expvars[ev.id] = int(v)
+ elif ev.type == 'float':
+ expvars[ev.id] = float(v) * 100
+ except ValueError:
+ self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(ev.key, ev.type))
+ return None
+
+ return expvars
+
+ @staticmethod
+ def _parse_memstats(data):
+
+ memstats = data['memstats']
+
+ # calculate the number of live objects in memory
+ live_objs = int(memstats['Mallocs']) - int(memstats['Frees'])
+
+ # calculate GC pause times average
+ # the Go runtime keeps the last 256 GC pause durations in a circular buffer,
+ # so we need to filter out the 0 values before the buffer is filled
+ gc_pauses = memstats['PauseNs']
+ try:
+ gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0])
+ # no GC cycles have occurred yet
+ except ZeroDivisionError:
+ gc_pause_avg = 0
+
+ return {
+ 'memstats_heap_alloc': memstats['HeapAlloc'],
+ 'memstats_heap_inuse': memstats['HeapInuse'],
+ 'memstats_stack_inuse': memstats['StackInuse'],
+ 'memstats_mspan_inuse': memstats['MSpanInuse'],
+ 'memstats_mcache_inuse': memstats['MCacheInuse'],
+ 'memstats_sys': memstats['Sys'],
+ 'memstats_live_objects': live_objs,
+ 'memstats_gc_pauses': gc_pause_avg,
+ }
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf
new file mode 100644
index 00000000..4b821cde
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf
@@ -0,0 +1,108 @@
+# netdata python.d.plugin configuration for go_expvar
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint
+#
+# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include
+# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# collect_memstats: true # enables charts for Go runtime's memory statistics
+# extra_charts: {} # defines extra data/charts to monitor, please see the example below
+#
+# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to
+# collect.
+#
+# Please visit the module wiki page for more information on how to use the extra_charts variable:
+#
+# https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin/go_expvar
+#
+# Configuration example
+# ---------------------
+
+#app1:
+# name : 'app1'
+# url : 'http://127.0.0.1:8080/debug/vars'
+# collect_memstats: true
+# extra_charts:
+# - id: "runtime_goroutines"
+# options:
+# name: num_goroutines
+# title: "runtime: number of goroutines"
+# units: goroutines
+# family: runtime
+# context: expvar.runtime.goroutines
+# chart_type: line
+# lines:
+# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+# - id: "foo_counters"
+# options:
+# name: counters
+# title: "some random counters"
+# units: awesomeness
+# family: counters
+# context: expvar.foo.counters
+# chart_type: line
+# lines:
+# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+
diff --git a/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md b/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
new file mode 100644
index 00000000..8d61fa2a
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
@@ -0,0 +1,335 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/go_expvar/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/go_expvar/metadata.yaml"
+sidebar_label: "Go applications (EXPVAR)"
+learn_status: "Published"
+learn_rel_path: "Data Collection/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Go applications (EXPVAR)
+
+
+<img src="https://netdata.cloud/img/go.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: go_expvar
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.
+
+It connects via http to gather the metrics exposed via the `expvar` package.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Go applications (EXPVAR) instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| expvar.memstats.heap | alloc, inuse | KiB |
+| expvar.memstats.stack | inuse | KiB |
+| expvar.memstats.mspan | inuse | KiB |
+| expvar.memstats.mcache | inuse | KiB |
+| expvar.memstats.live_objects | live | objects |
+| expvar.memstats.sys | sys | KiB |
+| expvar.memstats.gc_pauses | avg | ns |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable the go_expvar collector
+
+The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
+
+
+#### Sample `expvar` usage in a Go application
+
+The `expvar` package exposes metrics over HTTP and is very easy to use.
+Consider this minimal sample below:
+
+```go
+package main
+
+import (
+ _ "expvar"
+ "net/http"
+)
+
+func main() {
+ http.ListenAndServe("127.0.0.1:8080", nil)
+}
+```
+
+When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that
+exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening
+the URL in your browser (or by using `wget` or `curl`).
+
+Sample output:
+
+```json
+{
+"cmdline": ["./expvar-demo-binary"],
+"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <omitted for brevity>}
+}
+```
+
+You can of course expose and monitor your own variables as well.
+Here is a sample Go application that exposes a few custom variables:
+
+```go
+package main
+
+import (
+ "expvar"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func main() {
+
+ tick := time.NewTicker(1 * time.Second)
+ num_go := expvar.NewInt("runtime.goroutines")
+ counters := expvar.NewMap("counters")
+ counters.Set("cnt1", new(expvar.Int))
+ counters.Set("cnt2", new(expvar.Float))
+
+ go http.ListenAndServe(":8080", nil)
+
+ for {
+ select {
+ case <- tick.C:
+ num_go.Set(int64(runtime.NumGoroutine()))
+ counters.Add("cnt1", 1)
+ counters.AddFloat("cnt2", 1.452)
+ }
+ }
+}
+```
+
+Apart from the runtime memory stats, this application publishes two counters and the
+number of currently running Goroutines and updates these stats every second.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/go_expvar.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/go_expvar.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |
+| user | If the URL is password protected, this is the username to use. | | no |
+| pass | If the URL is password protected, this is the password to use. | | no |
+| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |
+| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |
+
+</details>
+
+#### Examples
+
+##### Monitor a Go app1 application
+
+The example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.
+
+The `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.
+
+The `extra_charts` variable is a YaML list of Netdata chart definitions.
+Each chart definition has the following keys:
+
+```
+id: Netdata chart ID
+options: a key-value mapping of chart options
+lines: a list of line definitions
+```
+
+**Note: please do not use dots in the chart or line ID field.
+See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
+
+Please see these two links to the official Netdata documentation for more information about the values:
+
+- [External plugins - charts](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#chart)
+- [Chart variables](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
+
+**Line definitions**
+
+Each chart can define multiple lines (dimensions).
+A line definition is a key-value mapping of line options.
+Each line can have the following options:
+
+```
+# mandatory
+expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
+expvar_type: value type; supported are "float" or "int"
+id: the id of this line/dimension in Netdata
+
+# optional - Netdata defaults are used if these options are not defined
+name: ''
+algorithm: absolute
+multiplier: 1
+divisor: 100 if expvar_type == float, 1 if expvar_type == int
+hidden: False
+```
+
+Please see the following link for more information about the options and their default values:
+[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#dimension)
+
+Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
+All dicts in the resulting JSON document are then flattened to one level.
+Expvar names are joined together with '.' when flattening.
+
+Example:
+
+```
+{
+ "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
+ "runtime.goroutines": 5
+}
+```
+
+In the above case, the exported variables will be available under `runtime.goroutines`,
+`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
+the first defined key wins and all subsequent keys with the same name are ignored.
+
+
+```yaml
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts:
+ - id: "runtime_goroutines"
+ options:
+ name: num_goroutines
+ title: "runtime: number of goroutines"
+ units: goroutines
+ family: runtime
+ context: expvar.runtime.goroutines
+ chart_type: line
+ lines:
+ - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+ - id: "foo_counters"
+ options:
+ name: counters
+ title: "some random counters"
+ units: awesomeness
+ family: counters
+ context: expvar.foo.counters
+ chart_type: line
+ lines:
+ - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+ - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin go_expvar debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/go_expvar/metadata.yaml b/collectors/python.d.plugin/go_expvar/metadata.yaml
new file mode 100644
index 00000000..9419b024
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/metadata.yaml
@@ -0,0 +1,329 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: go_expvar
+ monitored_instance:
+ name: Go applications (EXPVAR)
+ link: "https://pkg.go.dev/expvar"
+ categories:
+ - data-collection.apm
+ icon_filename: "go.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - go
+ - expvar
+ - application
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts."
+ method_description: "It connects via http to gather the metrics exposed via the `expvar` package."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "Enable the go_expvar collector"
+ description: |
+ The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+ ```bash
+ cd /etc/netdata # Replace this path with your Netdata config directory, if different
+ sudo ./edit-config python.d.conf
+ ```
+
+ Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
+ - title: "Sample `expvar` usage in a Go application"
+ description: |
+ The `expvar` package exposes metrics over HTTP and is very easy to use.
+ Consider this minimal sample below:
+
+ ```go
+ package main
+
+ import (
+ _ "expvar"
+ "net/http"
+ )
+
+ func main() {
+ http.ListenAndServe("127.0.0.1:8080", nil)
+ }
+ ```
+
+ When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that
+ exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening
+ the URL in your browser (or by using `wget` or `curl`).
+
+ Sample output:
+
+ ```json
+ {
+ "cmdline": ["./expvar-demo-binary"],
+ "memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <omitted for brevity>}
+ }
+ ```
+
+ You can of course expose and monitor your own variables as well.
+ Here is a sample Go application that exposes a few custom variables:
+
+ ```go
+ package main
+
+ import (
+ "expvar"
+ "net/http"
+ "runtime"
+ "time"
+ )
+
+ func main() {
+
+ tick := time.NewTicker(1 * time.Second)
+ num_go := expvar.NewInt("runtime.goroutines")
+ counters := expvar.NewMap("counters")
+ counters.Set("cnt1", new(expvar.Int))
+ counters.Set("cnt2", new(expvar.Float))
+
+ go http.ListenAndServe(":8080", nil)
+
+ for {
+ select {
+ case <- tick.C:
+ num_go.Set(int64(runtime.NumGoroutine()))
+ counters.Add("cnt1", 1)
+ counters.AddFloat("cnt2", 1.452)
+ }
+ }
+ }
+ ```
+
+ Apart from the runtime memory stats, this application publishes two counters and the
+ number of currently running Goroutines and updates these stats every second.
+ configuration:
+ file:
+ name: python.d/go_expvar.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: url
+ description: the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
+ default_value: ""
+ required: true
+ - name: user
+ description: If the URL is password protected, this is the username to use.
+ default_value: ""
+ required: false
+ - name: pass
+ description: If the URL is password protected, this is the password to use.
+ default_value: ""
+ required: false
+ - name: collect_memstats
+ description: Enables charts for Go runtime's memory statistics.
+ default_value: ""
+ required: false
+ - name: extra_charts
+ description: Defines extra data/charts to monitor, please see the example below.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Monitor a Go app1 application
+ description: |
+ The example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.
+
+ The `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.
+
+ The `extra_charts` variable is a YaML list of Netdata chart definitions.
+ Each chart definition has the following keys:
+
+ ```
+ id: Netdata chart ID
+ options: a key-value mapping of chart options
+ lines: a list of line definitions
+ ```
+
+ **Note: please do not use dots in the chart or line ID field.
+ See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
+
+ Please see these two links to the official Netdata documentation for more information about the values:
+
+ - [External plugins - charts](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#chart)
+ - [Chart variables](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
+
+ **Line definitions**
+
+ Each chart can define multiple lines (dimensions).
+ A line definition is a key-value mapping of line options.
+ Each line can have the following options:
+
+ ```
+ # mandatory
+ expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
+ expvar_type: value type; supported are "float" or "int"
+ id: the id of this line/dimension in Netdata
+
+ # optional - Netdata defaults are used if these options are not defined
+ name: ''
+ algorithm: absolute
+ multiplier: 1
+ divisor: 100 if expvar_type == float, 1 if expvar_type == int
+ hidden: False
+ ```
+
+ Please see the following link for more information about the options and their default values:
+ [External plugins - dimensions](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#dimension)
+
+ Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
+ All dicts in the resulting JSON document are then flattened to one level.
+ Expvar names are joined together with '.' when flattening.
+
+ Example:
+
+ ```
+ {
+ "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
+ "runtime.goroutines": 5
+ }
+ ```
+
+ In the above case, the exported variables will be available under `runtime.goroutines`,
+ `counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
+ the first defined key wins and all subsequent keys with the same name are ignored.
+ config: |
+ app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts:
+ - id: "runtime_goroutines"
+ options:
+ name: num_goroutines
+ title: "runtime: number of goroutines"
+ units: goroutines
+ family: runtime
+ context: expvar.runtime.goroutines
+ chart_type: line
+ lines:
+ - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+ - id: "foo_counters"
+ options:
+ name: counters
+ title: "some random counters"
+ units: awesomeness
+ family: counters
+ context: expvar.foo.counters
+ chart_type: line
+ lines:
+ - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+ - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: expvar.memstats.heap
+ description: "memory: size of heap memory structures"
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: alloc
+ - name: inuse
+ - name: expvar.memstats.stack
+ description: "memory: size of stack memory structures"
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: inuse
+ - name: expvar.memstats.mspan
+ description: "memory: size of mspan memory structures"
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: inuse
+ - name: expvar.memstats.mcache
+ description: "memory: size of mcache memory structures"
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: inuse
+ - name: expvar.memstats.live_objects
+ description: "memory: number of live objects"
+ unit: "objects"
+ chart_type: line
+ dimensions:
+ - name: live
+ - name: expvar.memstats.sys
+ description: "memory: size of reserved virtual address space"
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: sys
+ - name: expvar.memstats.gc_pauses
+ description: "memory: average duration of GC pauses"
+ unit: "ns"
+ chart_type: line
+ dimensions:
+ - name: avg
diff --git a/collectors/python.d.plugin/haproxy/Makefile.inc b/collectors/python.d.plugin/haproxy/Makefile.inc
new file mode 100644
index 00000000..ad24deaa
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += haproxy/haproxy.chart.py
+dist_pythonconfig_DATA += haproxy/haproxy.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += haproxy/README.md haproxy/Makefile.inc
+
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
new file mode 100644
index 00000000..2fa203f6
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -0,0 +1,90 @@
+<!--
+title: "HAProxy monitoring with Netdata"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/haproxy/README.md"
+sidebar_label: "haproxy-python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Integrations/Monitor/Webapps"
+-->
+
+# HAProxy collector
+
+Monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
+And health metrics such as backend servers status (server check should be used).
+
+Plugin can obtain data from URL or Unix socket.
+
+Requirement:
+
+- Socket must be readable and writable by the `netdata` user.
+- URL must have `stats uri <path>` present in the haproxy config, otherwise you will get HTTP 503 in the haproxy logs.
+
+It produces:
+
+1. **Frontend** family charts
+
+ - Kilobytes in/s
+ - Kilobytes out/s
+ - Sessions current
+ - Sessions in queue current
+
+2. **Backend** family charts
+
+ - Kilobytes in/s
+ - Kilobytes out/s
+ - Sessions current
+ - Sessions in queue current
+
+3. **Health** chart
+
+ - number of failed servers for every backend (in DOWN state)
+
+## Configuration
+
+Edit the `python.d/haproxy.conf` configuration file using `edit-config` from the Netdata [config
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/haproxy.conf
+```
+
+Sample:
+
+```yaml
+via_url:
+ user: 'username' # ONLY IF stats auth is used
+ pass: 'password' # # ONLY IF stats auth is used
+ url: 'http://ip.address:port/url;csv;norefresh'
+```
+
+OR
+
+```yaml
+via_socket:
+ socket: 'path/to/haproxy/sock'
+```
+
+If no configuration is given, module will fail to run.
+
+
+### Troubleshooting
+
+To troubleshoot issues with the `haproxy` module, run the `python.d.plugin` with the debug option enabled. The
+output will give you the output of the data collection job or error messages on why the collector isn't working.
+
+First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
+not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
+plugin's directory, switch to the `netdata` user.
+
+```bash
+cd /usr/libexec/netdata/plugins.d/
+sudo su -s /bin/bash netdata
+```
+
+Now you can manually run the `haproxy` module in debug mode:
+
+```bash
+./python.d.plugin haproxy debug trace
+```
+
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
new file mode 100644
index 00000000..f412febb
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -0,0 +1,368 @@
+# -*- coding: utf-8 -*-
+# Description: haproxy netdata python.d module
+# Author: ilyam8, ktarasz
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import defaultdict
+from re import compile as re_compile
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
+
+from bases.FrameworkServices.SocketService import SocketService
+from bases.FrameworkServices.UrlService import UrlService
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'fbin',
+ 'fbout',
+ 'fscur',
+ 'fqcur',
+ 'fhrsp_1xx',
+ 'fhrsp_2xx',
+ 'fhrsp_3xx',
+ 'fhrsp_4xx',
+ 'fhrsp_5xx',
+ 'fhrsp_other',
+ 'fhrsp_total',
+ 'bbin',
+ 'bbout',
+ 'bscur',
+ 'bqcur',
+ 'bhrsp_1xx',
+ 'bhrsp_2xx',
+ 'bhrsp_3xx',
+ 'bhrsp_4xx',
+ 'bhrsp_5xx',
+ 'bhrsp_other',
+ 'bhrsp_total',
+ 'bqtime',
+ 'bttime',
+ 'brtime',
+ 'bctime',
+ 'health_sup',
+ 'health_sdown',
+ 'health_smaint',
+ 'health_bdown',
+ 'health_idle'
+]
+
+CHARTS = {
+ 'fbin': {
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'],
+ 'lines': []
+ },
+ 'fbout': {
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'],
+ 'lines': []
+ },
+ 'fscur': {
+ 'options': [None, 'Sessions Active', 'sessions', 'frontend', 'haproxy_f.scur', 'line'],
+ 'lines': []
+ },
+ 'fqcur': {
+ 'options': [None, 'Session In Queue', 'sessions', 'frontend', 'haproxy_f.qcur', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_1xx': {
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_2xx': {
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_3xx': {
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_4xx': {
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_5xx': {
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_other': {
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'frontend',
+ 'haproxy_f.hrsp_other', 'line'],
+ 'lines': []
+ },
+ 'fhrsp_total': {
+ 'options': [None, 'HTTP responses', 'responses', 'frontend', 'haproxy_f.hrsp_total', 'line'],
+ 'lines': []
+ },
+ 'bbin': {
+ 'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'],
+ 'lines': []
+ },
+ 'bbout': {
+ 'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'],
+ 'lines': []
+ },
+ 'bscur': {
+ 'options': [None, 'Sessions Active', 'sessions', 'backend', 'haproxy_b.scur', 'line'],
+ 'lines': []
+ },
+ 'bqcur': {
+ 'options': [None, 'Sessions In Queue', 'sessions', 'backend', 'haproxy_b.qcur', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_1xx': {
+ 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_1xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_2xx': {
+ 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_2xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_3xx': {
+ 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_3xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_4xx': {
+ 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_4xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_5xx': {
+ 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_5xx', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_other': {
+ 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'backend',
+ 'haproxy_b.hrsp_other', 'line'],
+ 'lines': []
+ },
+ 'bhrsp_total': {
+ 'options': [None, 'HTTP responses (total)', 'responses/s', 'backend', 'haproxy_b.hrsp_total', 'line'],
+ 'lines': []
+ },
+ 'bqtime': {
+ 'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.qtime', 'line'],
+ 'lines': []
+ },
+ 'bctime': {
+ 'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.ctime', 'line'],
+ 'lines': []
+ },
+ 'brtime': {
+ 'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.rtime', 'line'],
+ 'lines': []
+ },
+ 'bttime': {
+ 'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend',
+ 'haproxy_b.ttime', 'line'],
+ 'lines': []
+ },
+ 'health_sdown': {
+ 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'],
+ 'lines': []
+ },
+ 'health_sup': {
+ 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
+ 'lines': []
+ },
+ 'health_smaint': {
+ 'options': [None, 'Backend Servers In MAINT State', 'maintenance servers', 'health', 'haproxy_hs.maint', 'line'],
+ 'lines': []
+ },
+ 'health_bdown': {
+ 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
+ 'lines': []
+ },
+ 'health_idle': {
+ 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'],
+ 'lines': [
+ ['idle', None, 'absolute']
+ ]
+ }
+}
+
+METRICS = {
+ 'bin': {'algorithm': 'incremental', 'divisor': 1024},
+ 'bout': {'algorithm': 'incremental', 'divisor': 1024},
+ 'scur': {'algorithm': 'absolute', 'divisor': 1},
+ 'qcur': {'algorithm': 'absolute', 'divisor': 1},
+ 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
+ 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
+}
+
+BACKEND_METRICS = {
+ 'qtime': {'algorithm': 'absolute', 'divisor': 1},
+ 'ctime': {'algorithm': 'absolute', 'divisor': 1},
+ 'rtime': {'algorithm': 'absolute', 'divisor': 1},
+ 'ttime': {'algorithm': 'absolute', 'divisor': 1}
+}
+
+REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
+ socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
+
+
+# TODO: the code is unreadable
+class Service(UrlService, SocketService):
+ def __init__(self, configuration=None, name=None):
+ if 'socket' in configuration:
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.poll = SocketService
+ self.options_ = dict(regex=REGEX['socket'],
+ stat='show stat\n'.encode(),
+ info='show info\n'.encode())
+ else:
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.poll = UrlService
+ self.options_ = dict(regex=REGEX['url'],
+ stat=self.url,
+ info=url_remove_params(self.url))
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def check(self):
+ if self.poll.check(self):
+ self.create_charts()
+ self.info('We are using %s.' % self.poll.__name__)
+ return True
+ return False
+
+ def _get_data(self):
+ to_netdata = dict()
+ self.request, self.url = self.options_['stat'], self.options_['stat']
+ stat_data = self._get_stat_data()
+ self.request, self.url = self.options_['info'], self.options_['info']
+ info_data = self._get_info_data(regex=self.options_['regex'])
+
+ to_netdata.update(stat_data)
+ to_netdata.update(info_data)
+ return to_netdata or None
+
+ def _get_stat_data(self):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+
+ if not raw_data:
+ return dict()
+
+ raw_data = raw_data.splitlines()
+ self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(',')))
+ for _ in range(1, len(raw_data))])
+ if not self.data:
+ return dict()
+
+ stat_data = dict()
+
+ for frontend in self.data['frontend']:
+ for metric in METRICS:
+ idx = frontend['# pxname'].replace('.', '_')
+ stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0
+
+ for backend in self.data['backend']:
+ name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_')
+ stat_data['hsup_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'UP')])
+ stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'DOWN')])
+ stat_data['hsmaint_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'MAINT')])
+ stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
+ for metric in BACKEND_METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ hrsp_total = 0
+ for metric in METRICS:
+ stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
+ if metric.startswith('hrsp_'):
+ hrsp_total += int(backend.get(metric) or 0)
+ stat_data['_'.join(['backend', 'hrsp_total', idx])] = hrsp_total
+ return stat_data
+
+ def _get_info_data(self, regex):
+ """
+ :return: dict
+ """
+ raw_data = self.poll._get_raw_data(self)
+ if not raw_data:
+ return dict()
+
+ match = regex.search(raw_data)
+ return match.groupdict() if match else dict()
+
+ @staticmethod
+ def _check_raw_data(data):
+ """
+ Check if all data has been gathered from socket
+ :param data: str
+ :return: boolean
+ """
+ return not bool(data)
+
+ def create_charts(self):
+ for front in self.data['frontend']:
+ name, idx = front['# pxname'], front['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]),
+ name, 'incremental', 1, 1])
+ for back in self.data['backend']:
+ name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
+ for metric in METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, METRICS[metric]['algorithm'], 1,
+ METRICS[metric]['divisor']])
+ self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]),
+ name, 'incremental', 1, 1])
+ for metric in BACKEND_METRICS:
+ self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
+ name, BACKEND_METRICS[metric]['algorithm'], 1,
+ BACKEND_METRICS[metric]['divisor']])
+ self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute'])
+ self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
+ self.definitions['health_smaint']['lines'].append(['hsmaint_' + idx, name, 'absolute'])
+ self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
+
+
+def parse_data_(data):
+ def is_backend(backend):
+ return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
+
+ def is_frontend(frontend):
+ return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
+
+ def is_server(server):
+ return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
+
+ if not data:
+ return None
+
+ result = defaultdict(list)
+ for elem in data:
+ if is_backend(elem):
+ result['backend'].append(elem)
+ continue
+ elif is_frontend(elem):
+ result['frontend'].append(elem)
+ continue
+ elif is_server(elem):
+ result['servers'].append(elem)
+
+ return result or None
+
+
+def server_status(server, backend_name, status='DOWN'):
+ return server.get('# pxname') == backend_name and server.get('status').partition(' ')[0] == status
+
+
+def url_remove_params(url):
+ parsed = urlparse(url or str())
+ return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path)
diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
new file mode 100644
index 00000000..10a0df3c
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/haproxy.conf
@@ -0,0 +1,83 @@
+# netdata python.d.plugin configuration for haproxy
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, haproxy also supports the following:
+#
+# IMPORTANT: socket MUST BE readable AND writable by netdata user
+#
+# socket: 'path/to/haproxy/sock'
+#
+# OR
+# url: 'http://<ip.address>:<port>/<url>;csv;norefresh'
+# [user: USERNAME] only if stats auth is used
+# [pass: PASSWORD] only if stats auth is used
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+#via_url:
+# user : 'admin'
+# pass : 'password'
+# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh'
+
+#via_socket:
+# socket: '/var/run/haproxy/admin.sock'
diff --git a/collectors/python.d.plugin/haproxy/metadata.yaml b/collectors/python.d.plugin/haproxy/metadata.yaml
new file mode 100644
index 00000000..82ab37d2
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/metadata.yaml
@@ -0,0 +1,322 @@
+# This collector will not appear in documentation, as the go version is preferred,
+# https://github.com/netdata/go.d.plugin/blob/master/modules/haproxy/README.md
+#
+#
+# meta:
+# plugin_name: python.d.plugin
+# module_name: haproxy
+# monitored_instance:
+# name: HAProxy
+# link: 'https://www.haproxy.org/'
+# categories:
+# - data-collection.web-servers-and-web-proxies
+# icon_filename: 'haproxy.png'
+# related_resources:
+# integrations:
+# list: []
+# info_provided_to_referring_integrations:
+# description: ''
+# keywords:
+# - haproxy
+# - tcp
+# - balancer
+# most_popular: false
+# overview:
+# data_collection:
+# metrics_description: 'This collector monitors HAProxy metrics about frontend servers, backend servers, responses and more.'
+# method_description: 'It connects to the HAProxy instance via URL or UNIX socket.'
+# supported_platforms:
+# include: []
+# exclude: []
+# multi_instance: true
+# additional_permissions:
+# description: ''
+# default_behavior:
+# auto_detection:
+# description: ''
+# limits:
+# description: ''
+# performance_impact:
+# description: ''
+# setup:
+# prerequisites:
+# list:
+# - title: 'HAProxy setup for socket'
+# description: 'Socket must be readable and writable by the netdata user.'
+# - title: 'HAProxy setup for URL'
+# description: 'URL must have `stats uri <path>` present in the haproxy config, otherwise you will get HTTP 503 in the haproxy logs.'
+# configuration:
+# file:
+# name: python.d/haproxy.conf
+# options:
+# description: |
+# There are 2 sections:
+
+# * Global variables
+# * One or more JOBS that can define multiple different instances to monitor.
+
+# The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+# Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+# Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+# folding:
+# title: "Config options"
+# enabled: true
+# list:
+# - name: update_every
+# description: Sets the default data collection frequency.
+# default_value: 5
+# required: false
+# - name: priority
+# description: Controls the order of charts at the netdata dashboard.
+# default_value: 60000
+# required: false
+# - name: autodetection_retry
+# description: Sets the job re-check interval in seconds.
+# default_value: 0
+# required: false
+# - name: penalty
+# description: Indicates whether to apply penalty to update_every in case of failures.
+# default_value: yes
+# required: false
+# - name: name
+# description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+# default_value: ''
+# required: false
+# - name: user
+# description: Username if stats auth is used.
+# default_value: ''
+# required: false
+# - name: pass
+# description: Password if stats auth is used.
+# default_value: ''
+# required: false
+# - name: url
+# description: URL to the haproxy_stats endpoint. Also make sure the parameters `csv` and `norefresh` are provided.
+# default_value: ''
+# required: false
+# - name: socket
+# description: Unix socket path to the haproxy sock file.
+# default_value: ''
+# required: false
+# examples:
+# folding:
+# enabled: true
+# title: "Config"
+# list:
+# - name: URL method
+# description: Use a URL to specify the endpoint to check for haproxy statistics.
+# config: |
+# via_url:
+# user: 'username' # ONLY IF stats auth is used
+# pass: 'password' # # ONLY IF stats auth is used
+# url: 'http://ip.address:port/url;csv;norefresh'
+# - name: Local socket
+# description: Use a local socket to check for haproxy statistics.
+# config: |
+# via_socket:
+# socket: 'path/to/haproxy/sock'
+# troubleshooting:
+# problems:
+# list: []
+# alerts:
+# - name: haproxy_backend_server_status
+# link: https://github.com/netdata/netdata/blob/master/health/health.d/haproxy.conf
+# metric: haproxy_hs.down
+# info: average number of failed haproxy backend servers over the last 10 seconds
+# - name: haproxy_backend_status
+# link: https://github.com/netdata/netdata/blob/master/health/health.d/haproxy.conf
+# metric: haproxy_hb.down
+# info: average number of failed haproxy backends over the last 10 seconds
+# metrics:
+# folding:
+# title: Metrics
+# enabled: false
+# description: ""
+# availability: []
+# scopes:
+# - name: global
+# description: 'These metrics refer to the entire monitored application.'
+# labels: []
+# metrics:
+# - name: haproxy_f.bin
+# description: Kilobytes In
+# unit: "KiB/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.bout
+# description: Kilobytes Out
+# unit: "KiB/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.scur
+# description: Sessions Active
+# unit: "sessions"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.qcur
+# description: Session In Queue
+# unit: "sessions"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.hrsp_1xx
+# description: HTTP responses with 1xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.hrsp_2xx
+# description: HTTP responses with 2xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.hrsp_3xx
+# description: HTTP responses with 3xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.hrsp_4xx
+# description: HTTP responses with 4xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.hrsp_5xx
+# description: HTTP responses with 5xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.hrsp_other
+# description: HTTP responses with other codes (protocol error)
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_f.hrsp_total
+# description: HTTP responses
+# unit: "responses"
+# chart_type: line
+# dimensions:
+# - name: a dimension per frontend server
+# - name: haproxy_b.bin
+# description: Kilobytes In
+# unit: "KiB/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.bout
+# description: Kilobytes Out
+# unit: "KiB/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.scur
+# description: Sessions Active
+# unit: "sessions"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.qcur
+# description: Sessions In Queue
+# unit: "sessions"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.hrsp_1xx
+# description: HTTP responses with 1xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.hrsp_2xx
+# description: HTTP responses with 2xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.hrsp_3xx
+# description: HTTP responses with 3xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.hrsp_4xx
+# description: HTTP responses with 4xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.hrsp_5xx
+# description: HTTP responses with 5xx code
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.hrsp_other
+# description: HTTP responses with other codes (protocol error)
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.hrsp_total
+# description: HTTP responses (total)
+# unit: "responses/s"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.qtime
+# description: The average queue time over the 1024 last requests
+# unit: "milliseconds"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.ctime
+# description: The average connect time over the 1024 last requests
+# unit: "milliseconds"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.rtime
+# description: The average response time over the 1024 last requests
+# unit: "milliseconds"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_b.ttime
+# description: The average total session time over the 1024 last requests
+# unit: "milliseconds"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_hs.down
+# description: Backend Servers In DOWN State
+# unit: "failed servers"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_hs.up
+# description: Backend Servers In UP State
+# unit: "health servers"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy_hb.down
+# description: Is Backend Failed?
+# unit: "boolean"
+# chart_type: line
+# dimensions:
+# - name: a dimension per backend server
+# - name: haproxy.idle
+# description: The Ratio Of Polling Time Vs Total Time
+# unit: "percentage"
+# chart_type: line
+# dimensions:
+# - name: idle
diff --git a/collectors/python.d.plugin/hddtemp/Makefile.inc b/collectors/python.d.plugin/hddtemp/Makefile.inc
new file mode 100644
index 00000000..22852b64
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += hddtemp/hddtemp.chart.py
+dist_pythonconfig_DATA += hddtemp/hddtemp.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc
+
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
new file mode 120000
index 00000000..95c7593f
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -0,0 +1 @@
+integrations/hdd_temperature.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
new file mode 100644
index 00000000..6427aa18
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+# Description: hddtemp netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+ORDER = [
+ 'temperatures',
+]
+
+CHARTS = {
+ 'temperatures': {
+ 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'],
+ 'lines': [
+ # lines are created dynamically in `check()` method
+ ]}}
+
+RE = re.compile(r'\/dev\/([^|]+)\|([^|]+)\|([0-9]+|SLP|UNK)\|')
+
+
+class Disk:
+ def __init__(self, id_, name, temp):
+ self.id = id_.split('/')[-1]
+ self.name = name.replace(' ', '_')
+ self.temp = temp if temp.isdigit() else None
+
+ def __repr__(self):
+ return self.id
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.do_only = self.configuration.get('devices')
+ self._keep_alive = False
+ self.request = ""
+ self.host = "127.0.0.1"
+ self.port = 7634
+
+ def get_disks(self):
+ r = self._get_raw_data()
+
+ if not r:
+ return None
+
+ m = RE.findall(r)
+
+ if not m:
+ self.error("received data doesn't have needed records")
+ return None
+
+ rv = [Disk(*d) for d in m]
+ self.debug('available disks: {0}'.format(rv))
+
+ if self.do_only:
+ return [v for v in rv if v.id in self.do_only]
+ return rv
+
+ def get_data(self):
+ """
+ Get data from TCP/IP socket
+ :return: dict
+ """
+
+ disks = self.get_disks()
+
+ if not disks:
+ return None
+
+ return dict((d.id, d.temp) for d in disks)
+
+ def check(self):
+ """
+ Parse configuration, check if hddtemp is available, and dynamically create chart lines data
+ :return: boolean
+ """
+ self._parse_config()
+ disks = self.get_disks()
+
+ if not disks:
+ return False
+
+ for d in disks:
+ dim = [d.id]
+ self.definitions['temperatures']['lines'].append(dim)
+
+ return True
+
+ @staticmethod
+ def _check_raw_data(data):
+ return not bool(data)
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf
new file mode 100644
index 00000000..b2d7aef6
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf
@@ -0,0 +1,95 @@
+# netdata python.d.plugin configuration for hddtemp
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, hddtemp also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+
+# By default this module will try to autodetect disks
+# (autodetection works only for disk which names start with "sd").
+# However this can be overridden by setting variable `disks` to
+# array of desired disks. Example for two disks:
+#
+# devices:
+# - sda
+# - sdb
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 7634
+
+localipv4:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+
+localipv6:
+ name: 'local'
+ host: '::1'
+ port: 7634
diff --git a/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md b/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md
new file mode 100644
index 00000000..4a1504f0
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md
@@ -0,0 +1,217 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hddtemp/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hddtemp/metadata.yaml"
+sidebar_label: "HDD temperature"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HDD temperature
+
+
+<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: hddtemp
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors disk temperatures.
+
+
+It uses the `hddtemp` daemon to gather the metrics.
+
+
+This collector is only supported on the following platforms:
+
+- Linux
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per HDD temperature instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hddtemp.temperatures | a dimension per disk | Celsius |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Run `hddtemp` in daemon mode
+
+You can execute `hddtemp` in TCP/IP daemon mode by using the `-d` argument.
+
+So running `hddtemp -d` would run the daemon, by default on port 7634.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/hddtemp.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/hddtemp.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+By default this collector will try to autodetect disks (autodetection works only for disk which names start with "sd"). However this can be overridden by setting the option `disks` to an array of desired disks.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
+| devices | Array of desired disks to detect, in case their name doesn't start with `sd`. | | no |
+| host | The IP or HOSTNAME to connect to. | localhost | yes |
+| port | The port to connect to. | 7634 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+
+```
+##### Custom disk names
+
+An example defining the disk names to detect.
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+ devices:
+ - customdisk1
+ - customdisk2
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+
+remote_job:
+ name : 'remote'
+ host : 'http://192.0.2.1:2812'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `hddtemp` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin hddtemp debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/hddtemp/metadata.yaml b/collectors/python.d.plugin/hddtemp/metadata.yaml
new file mode 100644
index 00000000..d8b56fc6
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/metadata.yaml
@@ -0,0 +1,163 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: hddtemp
+ monitored_instance:
+ name: HDD temperature
+ link: https://linux.die.net/man/8/hddtemp
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ icon_filename: "hard-drive.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - hardware
+ - hdd temperature
+ - disk temperature
+ - temperature
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors disk temperatures.
+ method_description: |
+ It uses the `hddtemp` daemon to gather the metrics.
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: By default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Run `hddtemp` in daemon mode
+ description: |
+ You can execute `hddtemp` in TCP/IP daemon mode by using the `-d` argument.
+
+ So running `hddtemp -d` would run the daemon, by default on port 7634.
+ configuration:
+ file:
+ name: "python.d/hddtemp.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+ By default this collector will try to autodetect disks (autodetection works only for disk which names start with "sd"). However this can be overridden by setting the option `disks` to an array of desired disks.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 1
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: >
+ Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: "local"
+ required: false
+ - name: devices
+ description: Array of desired disks to detect, in case their name doesn't start with `sd`.
+ default_value: ""
+ required: false
+ - name: host
+ description: The IP or HOSTNAME to connect to.
+ default_value: "localhost"
+ required: true
+ - name: port
+ description: The port to connect to.
+ default_value: 7634
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+ - name: Custom disk names
+ description: An example defining the disk names to detect.
+ config: |
+ localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+ devices:
+ - customdisk1
+ - customdisk2
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+
+ remote_job:
+ name : 'remote'
+ host : 'http://192.0.2.1:2812'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: hddtemp.temperatures
+ description: Disk Temperatures
+ unit: "Celsius"
+ chart_type: line
+ dimensions:
+ - name: a dimension per disk
diff --git a/collectors/python.d.plugin/hpssa/Makefile.inc b/collectors/python.d.plugin/hpssa/Makefile.inc
new file mode 100644
index 00000000..1c04aa49
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += hpssa/hpssa.chart.py
+dist_pythonconfig_DATA += hpssa/hpssa.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += hpssa/README.md hpssa/Makefile.inc
+
diff --git a/collectors/python.d.plugin/hpssa/README.md b/collectors/python.d.plugin/hpssa/README.md
new file mode 120000
index 00000000..82802d8b
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/README.md
@@ -0,0 +1 @@
+integrations/hp_smart_storage_arrays.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/hpssa/hpssa.chart.py b/collectors/python.d.plugin/hpssa/hpssa.chart.py
new file mode 100644
index 00000000..66be0083
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/hpssa.chart.py
@@ -0,0 +1,396 @@
+# -*- coding: utf-8 -*-
+# Description: hpssa netdata python.d module
+# Author: Peter Gnodde (gnoddep)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from copy import deepcopy
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+update_every = 5
+
+ORDER = [
+ 'ctrl_status',
+ 'ctrl_temperature',
+ 'ld_status',
+ 'pd_status',
+ 'pd_temperature',
+]
+
+CHARTS = {
+ 'ctrl_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Controller',
+ 'hpssa.ctrl_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'ctrl_temperature': {
+ 'options': [
+ None,
+ 'Temperature',
+ 'Celsius',
+ 'Controller',
+ 'hpssa.ctrl_temperature',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'ld_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Logical drives',
+ 'hpssa.ld_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'pd_status': {
+ 'options': [
+ None,
+ 'Status 1 is OK, Status 0 is not OK',
+ 'Status',
+ 'Physical drives',
+ 'hpssa.pd_status',
+ 'line'
+ ],
+ 'lines': []
+ },
+ 'pd_temperature': {
+ 'options': [
+ None,
+ 'Temperature',
+ 'Celsius',
+ 'Physical drives',
+ 'hpssa.pd_temperature',
+ 'line'
+ ],
+ 'lines': []
+ }
+}
+
+adapter_regex = re.compile(r'^(?P<adapter_type>.+) in Slot (?P<slot>\d+)')
+ignored_sections_regex = re.compile(
+ r'''
+ ^
+ Physical[ ]Drives
+ | None[ ]attached
+ | (?:Expander|Enclosure|SEP|Port[ ]Name:)[ ].+
+ | .+[ ]at[ ]Port[ ]\S+,[ ]Box[ ]\d+,[ ].+
+ | Mirror[ ]Group[ ]\d+:
+ $
+ ''',
+ re.X
+)
+mirror_group_regex = re.compile(r'^Mirror Group \d+:$')
+disk_partition_regex = re.compile(r'^Disk Partition Information$')
+array_regex = re.compile(r'^Array: (?P<id>[A-Z]+)$')
+drive_regex = re.compile(
+ r'''
+ ^
+ Logical[ ]Drive:[ ](?P<logical_drive_id>\d+)
+ | physicaldrive[ ](?P<fqn>[^:]+:\d+:\d+)
+ $
+ ''',
+ re.X
+)
+key_value_regex = re.compile(r'^(?P<key>[^:]+): ?(?P<value>.*)$')
+ld_status_regex = re.compile(r'^Status: (?P<status>[^,]+)(?:, (?P<percentage>[0-9.]+)% complete)?$')
+error_match = re.compile(r'Error:')
+
+
+class HPSSAException(Exception):
+ pass
+
+
+class HPSSA(object):
+ def __init__(self, lines):
+ self.lines = [line.strip() for line in lines if line.strip()]
+ self.current_line = 0
+ self.adapters = []
+ self.parse()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.current_line == len(self.lines):
+ raise StopIteration
+
+ line = self.lines[self.current_line]
+ self.current_line += 1
+
+ return line
+
+ def next(self):
+ """
+ This is for Python 2.7 compatibility
+ """
+ return self.__next__()
+
+ def rewind(self):
+ self.current_line = max(self.current_line - 1, 0)
+
+ @staticmethod
+ def match_any(line, *regexes):
+ return any([regex.match(line) for regex in regexes])
+
+ def parse(self):
+ for line in self:
+ match = adapter_regex.match(line)
+ if match:
+ self.adapters.append(self.parse_adapter(**match.groupdict()))
+
+ def parse_adapter(self, slot, adapter_type):
+ adapter = {
+ 'slot': int(slot),
+ 'type': adapter_type,
+
+ 'controller': {
+ 'status': None,
+ 'temperature': None,
+ },
+ 'cache': {
+ 'present': False,
+ 'status': None,
+ 'temperature': None,
+ },
+ 'battery': {
+ 'status': None,
+ 'count': 0,
+ },
+
+ 'logical_drives': [],
+ 'physical_drives': [],
+ }
+
+ for line in self:
+ if error_match.match(line):
+ raise HPSSAException('Error: {}'.format(line))
+ elif adapter_regex.match(line):
+ self.rewind()
+ break
+ elif array_regex.match(line):
+ self.parse_array(adapter)
+ elif line in ('Unassigned', 'unassigned') or line == 'HBA Drives':
+ self.parse_physical_drives(adapter)
+ elif ignored_sections_regex.match(line):
+ self.parse_ignored_section()
+ else:
+ match = key_value_regex.match(line)
+ if match:
+ key, value = match.group('key', 'value')
+ if key == 'Controller Status':
+ adapter['controller']['status'] = value == 'OK'
+ elif key == 'Controller Temperature (C)':
+ adapter['controller']['temperature'] = int(value)
+ elif key == 'Cache Board Present':
+ adapter['cache']['present'] = value == 'True'
+ elif key == 'Cache Status':
+ adapter['cache']['status'] = value == 'OK'
+ elif key == 'Cache Module Temperature (C)':
+ adapter['cache']['temperature'] = int(value)
+ elif key == 'Battery/Capacitor Count':
+ adapter['battery']['count'] = int(value)
+ elif key == 'Battery/Capacitor Status':
+ adapter['battery']['status'] = value == 'OK'
+ else:
+ raise HPSSAException('Cannot parse line: {}'.format(line))
+
+ return adapter
+
+ def parse_array(self, adapter):
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, ignored_sections_regex):
+ self.rewind()
+ break
+
+ match = drive_regex.match(line)
+ if match:
+ data = match.groupdict()
+ if data['logical_drive_id']:
+ self.parse_logical_drive(adapter, int(data['logical_drive_id']))
+ else:
+ self.parse_physical_drive(adapter, data['fqn'])
+ elif not key_value_regex.match(line):
+ self.rewind()
+ break
+
+ def parse_physical_drives(self, adapter):
+ for line in self:
+ match = drive_regex.match(line)
+ if match:
+ self.parse_physical_drive(adapter, match.group('fqn'))
+ else:
+ self.rewind()
+ break
+
+ def parse_logical_drive(self, adapter, logical_drive_id):
+ ld = {
+ 'id': logical_drive_id,
+ 'status': None,
+ 'status_complete': None,
+ }
+
+ for line in self:
+ if HPSSA.match_any(line, mirror_group_regex, disk_partition_regex):
+ self.parse_ignored_section()
+ continue
+
+ match = ld_status_regex.match(line)
+ if match:
+ ld['status'] = match.group('status') == 'OK'
+
+ if match.group('percentage'):
+ ld['status_complete'] = float(match.group('percentage')) / 100
+ elif HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
+ or not key_value_regex.match(line):
+ self.rewind()
+ break
+
+ adapter['logical_drives'].append(ld)
+
+ def parse_physical_drive(self, adapter, fqn):
+ pd = {
+ 'fqn': fqn,
+ 'status': None,
+ 'temperature': None,
+ }
+
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex):
+ self.rewind()
+ break
+
+ match = key_value_regex.match(line)
+ if match:
+ key, value = match.group('key', 'value')
+ if key == 'Status':
+ pd['status'] = value == 'OK'
+ elif key == 'Current Temperature (C)':
+ pd['temperature'] = int(value)
+ else:
+ self.rewind()
+ break
+
+ adapter['physical_drives'].append(pd)
+
+ def parse_ignored_section(self):
+ for line in self:
+ if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
+ or not key_value_regex.match(line):
+ self.rewind()
+ break
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.ssacli_path = self.configuration.get('ssacli_path', 'ssacli')
+ self.use_sudo = self.configuration.get('use_sudo', True)
+ self.cmd = []
+
+ def get_adapters(self):
+ try:
+ adapters = HPSSA(self._get_raw_data(command=self.cmd)).adapters
+ if not adapters:
+ # If no adapters are returned, run the command again but capture stderr
+ err = self._get_raw_data(command=self.cmd, stderr=True)
+ if err:
+ raise HPSSAException('Error executing cmd {}: {}'.format(' '.join(self.cmd), '\n'.join(err)))
+ return adapters
+ except HPSSAException as ex:
+ self.error(ex)
+ return []
+
+ def check(self):
+ if not os.path.isfile(self.ssacli_path):
+ ssacli_path = find_binary(self.ssacli_path)
+ if ssacli_path:
+ self.ssacli_path = ssacli_path
+ else:
+ self.error('Cannot locate "{}" binary'.format(self.ssacli_path))
+ return False
+
+ if self.use_sudo:
+ sudo = find_binary('sudo')
+ if not sudo:
+ self.error('Cannot locate "{}" binary'.format('sudo'))
+ return False
+
+ allowed = self._get_raw_data(command=[sudo, '-n', '-l', self.ssacli_path])
+ if not allowed or allowed[0].strip() != os.path.realpath(self.ssacli_path):
+ self.error('Not allowed to run sudo for command {}'.format(self.ssacli_path))
+ return False
+
+ self.cmd = [sudo, '-n']
+
+ self.cmd.extend([self.ssacli_path, 'ctrl', 'all', 'show', 'config', 'detail'])
+ self.info('Command: {}'.format(self.cmd))
+
+ adapters = self.get_adapters()
+
+ self.info('Discovered adapters: {}'.format([adapter['type'] for adapter in adapters]))
+ if not adapters:
+ self.error('No adapters discovered')
+ return False
+
+ return True
+
+ def get_data(self):
+ netdata = {}
+
+ for adapter in self.get_adapters():
+ status_key = '{}_status'.format(adapter['slot'])
+ temperature_key = '{}_temperature'.format(adapter['slot'])
+ ld_key = 'ld_{}_'.format(adapter['slot'])
+
+ data = {
+ 'ctrl_status': {
+ 'ctrl_' + status_key: adapter['controller']['status'],
+ 'cache_' + status_key: adapter['cache']['present'] and adapter['cache']['status'],
+ 'battery_' + status_key:
+ adapter['battery']['status'] if adapter['battery']['count'] > 0 else None
+ },
+
+ 'ctrl_temperature': {
+ 'ctrl_' + temperature_key: adapter['controller']['temperature'],
+ 'cache_' + temperature_key: adapter['cache']['temperature'],
+ },
+
+ 'ld_status': {
+ ld_key + '{}_status'.format(ld['id']): ld['status'] for ld in adapter['logical_drives']
+ },
+
+ 'pd_status': {},
+ 'pd_temperature': {},
+ }
+
+ for pd in adapter['physical_drives']:
+ pd_key = 'pd_{}_{}'.format(adapter['slot'], pd['fqn'])
+ data['pd_status'][pd_key + '_status'] = pd['status']
+ data['pd_temperature'][pd_key + '_temperature'] = pd['temperature']
+
+ for chart, dimension_data in data.items():
+ for dimension_id, value in dimension_data.items():
+ if value is None:
+ continue
+
+ if dimension_id not in self.charts[chart]:
+ self.charts[chart].add_dimension([dimension_id])
+
+ netdata[dimension_id] = value
+
+ return netdata
diff --git a/collectors/python.d.plugin/hpssa/hpssa.conf b/collectors/python.d.plugin/hpssa/hpssa.conf
new file mode 100644
index 00000000..cc50c983
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/hpssa.conf
@@ -0,0 +1,61 @@
+# netdata python.d.plugin configuration for hpssa
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 5 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, hpssa also supports the following:
+#
+# ssacli_path: /usr/sbin/ssacli # The path to the ssacli executable
+# use_sudo: True # Whether to use sudo or not
+# ----------------------------------------------------------------------
+
+# ssacli_path: /usr/sbin/ssacli
+# use_sudo: True
diff --git a/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md b/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md
new file mode 100644
index 00000000..d46cc906
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md
@@ -0,0 +1,205 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hpssa/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hpssa/metadata.yaml"
+sidebar_label: "HP Smart Storage Arrays"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HP Smart Storage Arrays
+
+
+<img src="https://netdata.cloud/img/hp.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: hpssa
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors HP Smart Storage Arrays metrics about operational statuses and temperatures.
+
+It uses the command line tool `ssacli`. The exact command used is `sudo -n ssacli ctrl all show config detail`
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is provided, the collector will try to execute the `ssacli` binary.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per HP Smart Storage Arrays instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hpssa.ctrl_status | ctrl_{adapter slot}_status, cache_{adapter slot}_status, battery_{adapter slot}_status per adapter | Status |
+| hpssa.ctrl_temperature | ctrl_{adapter slot}_temperature, cache_{adapter slot}_temperature per adapter | Celsius |
+| hpssa.ld_status | a dimension per logical drive | Status |
+| hpssa.pd_status | a dimension per physical drive | Status |
+| hpssa.pd_temperature | a dimension per physical drive | Celsius |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable the hpssa collector
+
+The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+
+Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
+
+
+#### Allow user netdata to execute `ssacli` as root.
+
+This module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password.
+
+- Add to your `/etc/sudoers` file:
+
+`which ssacli` shows the full path to the binary.
+
+```bash
+netdata ALL=(root) NOPASSWD: /path/to/ssacli
+```
+
+- Reset Netdata's systemd
+ unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux
+ distributions with systemd)
+
+The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`.
+
+As the `root` user, do the following:
+
+```cmd
+mkdir /etc/systemd/system/netdata.service.d
+echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+systemctl daemon-reload
+systemctl restart netdata.service
+```
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/hpssa.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/hpssa.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| ssacli_path | Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH | | no |
+| use_sudo | Whether or not to use `sudo` to execute `ssacli` | True | no |
+
+</details>
+
+#### Examples
+
+##### Local simple config
+
+A basic configuration, specyfing the path to `ssacli`
+
+```yaml
+local:
+ ssacli_path: /usr/sbin/ssacli
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `hpssa` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin hpssa debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/hpssa/metadata.yaml b/collectors/python.d.plugin/hpssa/metadata.yaml
new file mode 100644
index 00000000..7871cc27
--- /dev/null
+++ b/collectors/python.d.plugin/hpssa/metadata.yaml
@@ -0,0 +1,185 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: hpssa
+ monitored_instance:
+ name: HP Smart Storage Arrays
+ link: 'https://buy.hpe.com/us/en/software/server-management-software/server-management-software/smart-array-management-software/hpe-smart-storage-administrator/p/5409020'
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: 'hp.svg'
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ''
+ keywords:
+ - storage
+ - hp
+ - hpssa
+ - array
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: 'This collector monitors HP Smart Storage Arrays metrics about operational statuses and temperatures.'
+ method_description: 'It uses the command line tool `ssacli`. The exact command used is `sudo -n ssacli ctrl all show config detail`'
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ''
+ default_behavior:
+ auto_detection:
+ description: 'If no configuration is provided, the collector will try to execute the `ssacli` binary.'
+ limits:
+ description: ''
+ performance_impact:
+ description: ''
+ setup:
+ prerequisites:
+ list:
+ - title: 'Enable the hpssa collector'
+ description: |
+ The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+ ```bash
+ cd /etc/netdata # Replace this path with your Netdata config directory, if different
+ sudo ./edit-config python.d.conf
+ ```
+
+ Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
+ - title: 'Allow user netdata to execute `ssacli` as root.'
+ description: |
+ This module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password.
+
+ - Add to your `/etc/sudoers` file:
+
+ `which ssacli` shows the full path to the binary.
+
+ ```bash
+ netdata ALL=(root) NOPASSWD: /path/to/ssacli
+ ```
+
+ - Reset Netdata's systemd
+ unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux
+ distributions with systemd)
+
+ The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`.
+
+ As the `root` user, do the following:
+
+ ```cmd
+ mkdir /etc/systemd/system/netdata.service.d
+ echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+ systemctl daemon-reload
+ systemctl restart netdata.service
+ ```
+ configuration:
+ file:
+ name: python.d/hpssa.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ''
+ required: false
+ - name: ssacli_path
+ description: Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH
+ default_value: ''
+ required: false
+ - name: use_sudo
+ description: Whether or not to use `sudo` to execute `ssacli`
+ default_value: 'True'
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Local simple config
+ description: A basic configuration, specyfing the path to `ssacli`
+ folding:
+ enabled: false
+ config: |
+ local:
+ ssacli_path: /usr/sbin/ssacli
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: hpssa.ctrl_status
+ description: Status 1 is OK, Status 0 is not OK
+ unit: "Status"
+ chart_type: line
+ dimensions:
+ - name: ctrl_{adapter slot}_status
+ - name: cache_{adapter slot}_status
+ - name: battery_{adapter slot}_status per adapter
+ - name: hpssa.ctrl_temperature
+ description: Temperature
+ unit: "Celsius"
+ chart_type: line
+ dimensions:
+ - name: ctrl_{adapter slot}_temperature
+ - name: cache_{adapter slot}_temperature per adapter
+ - name: hpssa.ld_status
+ description: Status 1 is OK, Status 0 is not OK
+ unit: "Status"
+ chart_type: line
+ dimensions:
+ - name: a dimension per logical drive
+ - name: hpssa.pd_status
+ description: Status 1 is OK, Status 0 is not OK
+ unit: "Status"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical drive
+ - name: hpssa.pd_temperature
+ description: Temperature
+ unit: "Celsius"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical drive
diff --git a/collectors/python.d.plugin/icecast/Makefile.inc b/collectors/python.d.plugin/icecast/Makefile.inc
new file mode 100644
index 00000000..cb7c6fa0
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += icecast/icecast.chart.py
+dist_pythonconfig_DATA += icecast/icecast.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += icecast/README.md icecast/Makefile.inc
+
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
new file mode 120000
index 00000000..db3c1b57
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -0,0 +1 @@
+integrations/icecast.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
new file mode 100644
index 00000000..a967d177
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+# Description: icecast netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'listeners',
+]
+
+CHARTS = {
+ 'listeners': {
+ 'options': [None, 'Number Of Listeners', 'listeners', 'listeners', 'icecast.listeners', 'line'],
+ 'lines': [
+ ]
+ }
+}
+
+
+class Source:
+ def __init__(self, idx, data):
+ self.name = 'source_{0}'.format(idx)
+ self.is_active = data.get('stream_start') and data.get('server_name')
+ self.listeners = data['listeners']
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url')
+ self._manager = self._build_manager()
+
+ def check(self):
+ """
+ Add active sources to the "listeners" chart
+ :return: bool
+ """
+ sources = self.get_sources()
+ if not sources:
+ return None
+
+ active_sources = 0
+ for idx, raw_source in enumerate(sources):
+ if Source(idx, raw_source).is_active:
+ active_sources += 1
+ dim_id = 'source_{0}'.format(idx)
+ dim = 'source {0}'.format(idx)
+ self.definitions['listeners']['lines'].append([dim_id, dim])
+
+ return bool(active_sources)
+
+ def _get_data(self):
+ """
+ Get number of listeners for every source
+ :return: dict
+ """
+ sources = self.get_sources()
+ if not sources:
+ return None
+
+ data = dict()
+
+ for idx, raw_source in enumerate(sources):
+ source = Source(idx, raw_source)
+ data[source.name] = source.listeners
+
+ return data
+
+ def get_sources(self):
+ """
+ Format data received from http request and return list of sources
+ :return: list
+ """
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ data = json.loads(raw_data)
+ except ValueError as error:
+ self.error('JSON decode error:', error)
+ return None
+
+ sources = data['icestats'].get('source')
+ if not sources:
+ return None
+
+ return sources if isinstance(sources, list) else [sources]
diff --git a/collectors/python.d.plugin/icecast/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf
new file mode 100644
index 00000000..a33074ae
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/icecast.conf
@@ -0,0 +1,81 @@
+# netdata python.d.plugin configuration for icecast
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, icecast also supports the following:
+#
+# url: 'URL' # the URL to fetch icecast's stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8443/status-json.xsl'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8443/status-json.xsl' \ No newline at end of file
diff --git a/collectors/python.d.plugin/icecast/integrations/icecast.md b/collectors/python.d.plugin/icecast/integrations/icecast.md
new file mode 100644
index 00000000..12d7d59e
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/integrations/icecast.md
@@ -0,0 +1,166 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/icecast/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/icecast/metadata.yaml"
+sidebar_label: "Icecast"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Media Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Icecast
+
+
+<img src="https://netdata.cloud/img/icecast.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: icecast
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Icecast listener counts.
+
+It connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+Without configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Icecast instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| icecast.listeners | a dimension for each active source | listeners |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Icecast minimum version
+
+Needs at least icecast version >= 2.4.0
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/icecast.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/icecast.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no |
+| user | Username to use to connect to `url` if it's password protected. | | no |
+| pass | Password to use to connect to `url` if it's password protected. | | no |
+
+</details>
+
+#### Examples
+
+##### Remote Icecast server
+
+Configure a remote icecast server
+
+```yaml
+remote:
+ url: 'http://1.2.3.4:8443/status-json.xsl'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `icecast` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin icecast debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/icecast/metadata.yaml b/collectors/python.d.plugin/icecast/metadata.yaml
new file mode 100644
index 00000000..4bcf5e39
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/metadata.yaml
@@ -0,0 +1,127 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: icecast
+ monitored_instance:
+ name: Icecast
+ link: 'https://icecast.org/'
+ categories:
+ - data-collection.media-streaming-servers
+ icon_filename: 'icecast.svg'
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ''
+ keywords:
+ - icecast
+ - streaming
+ - media
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: 'This collector monitors Icecast listener counts.'
+ method_description: 'It connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.'
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ''
+ default_behavior:
+ auto_detection:
+ description: 'Without configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl'
+ limits:
+ description: ''
+ performance_impact:
+ description: ''
+ setup:
+ prerequisites:
+ list:
+ - title: 'Icecast minimum version'
+ description: 'Needs at least icecast version >= 2.4.0'
+ configuration:
+ file:
+ name: python.d/icecast.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ''
+ required: false
+ - name: url
+ description: The URL (and port) to the icecast server. Needs to also include `/status-json.xsl`
+ default_value: 'http://localhost:8443/status-json.xsl'
+ required: false
+ - name: user
+ description: Username to use to connect to `url` if it's password protected.
+ default_value: ''
+ required: false
+ - name: pass
+ description: Password to use to connect to `url` if it's password protected.
+ default_value: ''
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Remote Icecast server
+ description: Configure a remote icecast server
+ folding:
+ enabled: false
+ config: |
+ remote:
+ url: 'http://1.2.3.4:8443/status-json.xsl'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: icecast.listeners
+ description: Number Of Listeners
+ unit: "listeners"
+ chart_type: line
+ dimensions:
+ - name: a dimension for each active source
diff --git a/collectors/python.d.plugin/ipfs/Makefile.inc b/collectors/python.d.plugin/ipfs/Makefile.inc
new file mode 100644
index 00000000..68458cb3
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += ipfs/ipfs.chart.py
+dist_pythonconfig_DATA += ipfs/ipfs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += ipfs/README.md ipfs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
new file mode 120000
index 00000000..eee6a07b
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -0,0 +1 @@
+integrations/ipfs.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/ipfs/integrations/ipfs.md b/collectors/python.d.plugin/ipfs/integrations/ipfs.md
new file mode 100644
index 00000000..77dc745a
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/integrations/ipfs.md
@@ -0,0 +1,203 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/metadata.yaml"
+sidebar_label: "IPFS"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IPFS
+
+
+<img src="https://netdata.cloud/img/ipfs.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: ipfs
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors IPFS server metrics about its quality and performance.
+
+It connects to an http endpoint of the IPFS server to collect the metrics
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If the endpoint is accessible by the Agent, netdata will autodetect it
+
+#### Limits
+
+Calls to the following endpoints are disabled due to IPFS bugs:
+
+/api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)
+/api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)
+
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per IPFS instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ipfs.bandwidth | in, out | kilobits/s |
+| ipfs.peers | peers | peers |
+| ipfs.repo_size | avail, size | GiB |
+| ipfs.repo_objects | objects, pinned, recursive_pins | objects |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/ipfs.conf) | ipfs.repo_size | IPFS datastore utilization |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/ipfs.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/ipfs.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary></summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |
+| url | URL to the IPFS API | no | yes |
+| repoapi | Collect repo metrics. | no | no |
+| pinapi | Set status of IPFS pinned object polling. | no | no |
+
+</details>
+
+#### Examples
+
+##### Basic (default out-of-the-box)
+
+A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.
+
+```yaml
+localhost:
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
+
+remote_host:
+ name: 'remote'
+ url: 'http://192.0.2.1:5001'
+ repoapi: no
+ pinapi: no
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `ipfs` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin ipfs debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
new file mode 100644
index 00000000..abfc9c49
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+# Description: IPFS netdata python.d module
+# Authors: davidak
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'repo_size',
+ 'repo_objects',
+]
+
+CHARTS = {
+ 'bandwidth': {
+ 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
+ 'lines': [
+ ['in', None, 'absolute', 8, 1000],
+ ['out', None, 'absolute', -8, 1000]
+ ]
+ },
+ 'peers': {
+ 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
+ 'lines': [
+ ['peers', None, 'absolute']
+ ]
+ },
+ 'repo_size': {
+ 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
+ 'lines': [
+ ['avail', None, 'absolute', 1, 1 << 30],
+ ['size', None, 'absolute', 1, 1 << 30],
+ ]
+ },
+ 'repo_objects': {
+ 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
+ 'lines': [
+ ['objects', None, 'absolute', 1, 1],
+ ['pinned', None, 'absolute', 1, 1],
+ ['recursive_pins', None, 'absolute', 1, 1]
+ ]
+ }
+}
+
+SI_zeroes = {
+ 'k': 3,
+ 'm': 6,
+ 'g': 9,
+ 't': 12,
+ 'p': 15,
+ 'e': 18,
+ 'z': 21,
+ 'y': 24
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.baseurl = self.configuration.get('url', 'http://localhost:5001')
+ self.method = "POST"
+ self.do_pinapi = self.configuration.get('pinapi')
+ self.do_repoapi = self.configuration.get('repoapi')
+ self.__storage_max = None
+
+ def _get_json(self, sub_url):
+ """
+ :return: json decoding of the specified url
+ """
+ self.url = self.baseurl + sub_url
+ try:
+ return json.loads(self._get_raw_data())
+ except (TypeError, ValueError):
+ return dict()
+
+ @staticmethod
+ def _recursive_pins(keys):
+ return sum(1 for k in keys if keys[k]['Type'] == b'recursive')
+
+ @staticmethod
+ def _dehumanize(store_max):
+ # convert from '10Gb' to 10000000000
+ if not isinstance(store_max, int):
+ store_max = store_max.lower()
+ if store_max.endswith('b'):
+ val, units = store_max[:-2], store_max[-2]
+ if units in SI_zeroes:
+ val += '0' * SI_zeroes[units]
+ store_max = val
+ try:
+ store_max = int(store_max)
+ except (TypeError, ValueError):
+ store_max = None
+ return store_max
+
+ def _storagemax(self, store_cfg):
+ if self.__storage_max is None:
+ self.__storage_max = self._dehumanize(store_cfg)
+ return self.__storage_max
+
+ def _get_data(self):
+ """
+ Get data from API
+ :return: dict
+ """
+ # suburl : List of (result-key, original-key, transform-func)
+ cfg = {
+ '/api/v0/stats/bw':
+ [
+ ('in', 'RateIn', int),
+ ('out', 'RateOut', int),
+ ],
+ '/api/v0/swarm/peers':
+ [
+ ('peers', 'Peers', len),
+ ],
+ }
+ if self.do_repoapi:
+ cfg.update({
+ '/api/v0/stats/repo':
+ [
+ ('size', 'RepoSize', int),
+ ('objects', 'NumObjects', int),
+ ('avail', 'StorageMax', self._storagemax),
+ ],
+ })
+
+ if self.do_pinapi:
+ cfg.update({
+ '/api/v0/pin/ls':
+ [
+ ('pinned', 'Keys', len),
+ ('recursive_pins', 'Keys', self._recursive_pins),
+ ]
+ })
+ r = dict()
+ for suburl in cfg:
+ in_json = self._get_json(suburl)
+ for new_key, orig_key, xmute in cfg[suburl]:
+ try:
+ r[new_key] = xmute(in_json[orig_key])
+ except Exception as error:
+ self.debug(error)
+ return r or None
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
new file mode 100644
index 00000000..8b167b39
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
@@ -0,0 +1,82 @@
+# netdata python.d.plugin configuration for ipfs
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, ipfs also supports the following:
+#
+# url: 'URL' # URL to the IPFS API
+# repoapi: no # Collect repo metrics
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/7528
+# # resulting in very high CPU Usage
+# pinapi: no # Set status of IPFS pinned object polling
+# # Currently defaults to disabled due to IPFS Bug
+# # https://github.com/ipfs/go-ipfs/issues/3874
+# # resulting in very high CPU Usage
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
diff --git a/collectors/python.d.plugin/ipfs/metadata.yaml b/collectors/python.d.plugin/ipfs/metadata.yaml
new file mode 100644
index 00000000..dbc421c9
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/metadata.yaml
@@ -0,0 +1,172 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: ipfs
+ monitored_instance:
+ name: IPFS
+ link: "https://ipfs.tech/"
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: "ipfs.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords: []
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors IPFS server metrics about its quality and performance."
+ method_description: "It connects to an http endpoint of the IPFS server to collect the metrics"
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "If the endpoint is accessible by the Agent, netdata will autodetect it"
+ limits:
+ description: |
+ Calls to the following endpoints are disabled due to IPFS bugs:
+
+ /api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)
+ /api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "python.d/ipfs.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: ""
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: The JOB's name as it will appear at the dashboard (by default is the job_name)
+ default_value: job_name
+ required: false
+ - name: url
+ description: URL to the IPFS API
+ default_value: no
+ required: true
+ - name: repoapi
+ description: Collect repo metrics.
+ default_value: no
+ required: false
+ - name: pinapi
+ description: Set status of IPFS pinned object polling.
+ default_value: no
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic (default out-of-the-box)
+ description: A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.
+ folding:
+ enabled: false
+ config: |
+ localhost:
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ localhost:
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
+
+ remote_host:
+ name: 'remote'
+ url: 'http://192.0.2.1:5001'
+ repoapi: no
+ pinapi: no
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: ipfs_datastore_usage
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/ipfs.conf
+ metric: ipfs.repo_size
+ info: IPFS datastore utilization
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: ipfs.bandwidth
+ description: IPFS Bandwidth
+ unit: "kilobits/s"
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: out
+ - name: ipfs.peers
+ description: IPFS Peers
+ unit: "peers"
+ chart_type: line
+ dimensions:
+ - name: peers
+ - name: ipfs.repo_size
+ description: IPFS Repo Size
+ unit: "GiB"
+ chart_type: area
+ dimensions:
+ - name: avail
+ - name: size
+ - name: ipfs.repo_objects
+ description: IPFS Repo Objects
+ unit: "objects"
+ chart_type: line
+ dimensions:
+ - name: objects
+ - name: pinned
+ - name: recursive_pins
diff --git a/collectors/python.d.plugin/litespeed/Makefile.inc b/collectors/python.d.plugin/litespeed/Makefile.inc
new file mode 100644
index 00000000..5dd64502
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += litespeed/litespeed.chart.py
+dist_pythonconfig_DATA += litespeed/litespeed.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += litespeed/README.md litespeed/Makefile.inc
+
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
new file mode 120000
index 00000000..e7418b3d
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -0,0 +1 @@
+integrations/litespeed.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/litespeed/integrations/litespeed.md b/collectors/python.d.plugin/litespeed/integrations/litespeed.md
new file mode 100644
index 00000000..87f2d0b1
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/integrations/litespeed.md
@@ -0,0 +1,170 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/litespeed/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/litespeed/metadata.yaml"
+sidebar_label: "Litespeed"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Litespeed
+
+
+<img src="https://netdata.cloud/img/litespeed.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: litespeed
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Examine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.
+
+The collector uses the statistics under /tmp/lshttpd to gather the metrics.
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Litespeed instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| litespeed.net_throughput | in, out | kilobits/s |
+| litespeed.net_throughput | in, out | kilobits/s |
+| litespeed.connections | free, used | conns |
+| litespeed.connections | free, used | conns |
+| litespeed.requests | requests | requests/s |
+| litespeed.requests_processing | processing | requests |
+| litespeed.cache | hits | hits/s |
+| litespeed.cache | hits | hits/s |
+| litespeed.static | hits | hits/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/litespeed.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/litespeed.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| path | Use a different path than the default, where the lightspeed stats files reside. | /tmp/lshttpd/ | no |
+
+</details>
+
+#### Examples
+
+##### Set the path to statistics
+
+Change the path for the litespeed stats files
+
+```yaml
+localhost:
+ name: 'local'
+ path: '/tmp/lshttpd'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `litespeed` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin litespeed debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
new file mode 100644
index 00000000..7ef8189e
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+# Description: litespeed netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import glob
+import os
+import re
+from collections import namedtuple
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+update_every = 10
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'net_throughput_http', # net throughput
+ 'net_throughput_https', # net throughput
+ 'connections_http', # connections
+ 'connections_https', # connections
+ 'requests', # requests
+ 'requests_processing', # requests
+ 'pub_cache_hits', # cache
+ 'private_cache_hits', # cache
+ 'static_hits', # static
+]
+
+CHARTS = {
+ 'net_throughput_http': {
+ 'options': [None, 'Network Throughput HTTP', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['bps_in', 'in', 'absolute'],
+ ['bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'net_throughput_https': {
+ 'options': [None, 'Network Throughput HTTPS', 'kilobits/s', 'net throughput',
+ 'litespeed.net_throughput', 'area'],
+ 'lines': [
+ ['ssl_bps_in', 'in', 'absolute'],
+ ['ssl_bps_out', 'out', 'absolute', -1]
+ ]
+ },
+ 'connections_http': {
+ 'options': [None, 'Connections HTTP', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['conn_free', 'free', 'absolute'],
+ ['conn_used', 'used', 'absolute']
+ ]
+ },
+ 'connections_https': {
+ 'options': [None, 'Connections HTTPS', 'conns', 'connections', 'litespeed.connections', 'stacked'],
+ 'lines': [
+ ['ssl_conn_free', 'free', 'absolute'],
+ ['ssl_conn_used', 'used', 'absolute']
+ ]
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'litespeed.requests', 'line'],
+ 'lines': [
+ ['requests', None, 'absolute', 1, 100]
+ ]
+ },
+ 'requests_processing': {
+ 'options': [None, 'Requests In Processing', 'requests', 'requests', 'litespeed.requests_processing', 'line'],
+ 'lines': [
+ ['requests_processing', 'processing', 'absolute']
+ ]
+ },
+ 'pub_cache_hits': {
+ 'options': [None, 'Public Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['pub_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'private_cache_hits': {
+ 'options': [None, 'Private Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
+ 'lines': [
+ ['private_cache_hits', 'hits', 'absolute', 1, 100]
+ ]
+ },
+ 'static_hits': {
+ 'options': [None, 'Static Hits', 'hits/s', 'static', 'litespeed.static', 'line'],
+ 'lines': [
+ ['static_hits', 'hits', 'absolute', 1, 100]
+ ]
+ }
+}
+
+t = namedtuple('T', ['key', 'id', 'mul'])
+
+T = [
+ t('BPS_IN', 'bps_in', 8),
+ t('BPS_OUT', 'bps_out', 8),
+ t('SSL_BPS_IN', 'ssl_bps_in', 8),
+ t('SSL_BPS_OUT', 'ssl_bps_out', 8),
+ t('REQ_PER_SEC', 'requests', 100),
+ t('REQ_PROCESSING', 'requests_processing', 1),
+ t('PUB_CACHE_HITS_PER_SEC', 'pub_cache_hits', 100),
+ t('PRIVATE_CACHE_HITS_PER_SEC', 'private_cache_hits', 100),
+ t('STATIC_HITS_PER_SEC', 'static_hits', 100),
+ t('PLAINCONN', 'conn_used', 1),
+ t('AVAILCONN', 'conn_free', 1),
+ t('SSLCONN', 'ssl_conn_used', 1),
+ t('AVAILSSL', 'ssl_conn_free', 1),
+]
+
+RE = re.compile(r'([A-Z_]+): ([0-9.]+)')
+
+ZERO_DATA = {
+ 'bps_in': 0,
+ 'bps_out': 0,
+ 'ssl_bps_in': 0,
+ 'ssl_bps_out': 0,
+ 'requests': 0,
+ 'requests_processing': 0,
+ 'pub_cache_hits': 0,
+ 'private_cache_hits': 0,
+ 'static_hits': 0,
+ 'conn_used': 0,
+ 'conn_free': 0,
+ 'ssl_conn_used': 0,
+ 'ssl_conn_free': 0,
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.path = self.configuration.get('path', '/tmp/lshttpd/')
+ self.files = list()
+
+ def check(self):
+ if not self.path:
+ self.error('"path" not specified')
+ return False
+
+ fs = glob.glob(os.path.join(self.path, '.rtreport*'))
+
+ if not fs:
+ self.error('"{0}" has no "rtreport" files or dir is not readable'.format(self.path))
+ return None
+
+ self.debug('stats files:', fs)
+
+ for f in fs:
+ if not is_readable_file(f):
+ self.error('{0} is not readable'.format(f))
+ continue
+ self.files.append(f)
+
+ return bool(self.files)
+
+ def get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = dict(ZERO_DATA)
+
+ for f in self.files:
+ try:
+ with open(f) as b:
+ lines = b.readlines()
+ except (OSError, IOError) as err:
+ self.error(err)
+ return None
+ else:
+ parse_file(data, lines)
+
+ return data
+
+
+def parse_file(data, lines):
+ for line in lines:
+ if not line.startswith(('BPS_IN:', 'MAXCONN:', 'PLAINCONN:', 'REQ_RATE []:')):
+ continue
+ m = dict(RE.findall(line))
+ for v in T:
+ if v.key in m:
+ data[v.id] += float(m[v.key]) * v.mul
+
+
+def is_readable_file(v):
+ return os.path.isfile(v) and os.access(v, os.R_OK)
diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf
new file mode 100644
index 00000000..a326e184
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/litespeed.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for litespeed
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, lightspeed also supports the following:
+#
+# path: 'PATH' # path to lightspeed stats files directory
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ path : '/tmp/lshttpd/'
diff --git a/collectors/python.d.plugin/litespeed/metadata.yaml b/collectors/python.d.plugin/litespeed/metadata.yaml
new file mode 100644
index 00000000..400f3a7f
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/metadata.yaml
@@ -0,0 +1,168 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: litespeed
+ monitored_instance:
+ name: Litespeed
+ link: "https://www.litespeedtech.com/products/litespeed-web-server"
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: "litespeed.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - litespeed
+ - web
+ - server
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Examine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery."
+ method_description: "The collector uses the statistics under /tmp/lshttpd to gather the metrics."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "If no configuration is present, the collector will attempt to read files under /tmp/lshttpd/."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: python.d/litespeed.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: path
+ description: Use a different path than the default, where the lightspeed stats files reside.
+ default_value: "/tmp/lshttpd/"
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Set the path to statistics
+ description: Change the path for the litespeed stats files
+ config: |
+ localhost:
+ name: 'local'
+ path: '/tmp/lshttpd'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: litespeed.net_throughput
+ description: Network Throughput HTTP
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: litespeed.net_throughput
+ description: Network Throughput HTTPS
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: litespeed.connections
+ description: Connections HTTP
+ unit: "conns"
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: litespeed.connections
+ description: Connections HTTPS
+ unit: "conns"
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: litespeed.requests
+ description: Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: litespeed.requests_processing
+ description: Requests In Processing
+ unit: "requests"
+ chart_type: line
+ dimensions:
+ - name: processing
+ - name: litespeed.cache
+ description: Public Cache Hits
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: litespeed.cache
+ description: Private Cache Hits
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: litespeed.static
+ description: Static Hits
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hits
diff --git a/collectors/python.d.plugin/megacli/Makefile.inc b/collectors/python.d.plugin/megacli/Makefile.inc
new file mode 100644
index 00000000..83680d72
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += megacli/megacli.chart.py
+dist_pythonconfig_DATA += megacli/megacli.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += megacli/README.md megacli/Makefile.inc
+
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
new file mode 120000
index 00000000..e5df4d41
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -0,0 +1 @@
+integrations/megacli.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/megacli/integrations/megacli.md b/collectors/python.d.plugin/megacli/integrations/megacli.md
new file mode 100644
index 00000000..0c4af78a
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/integrations/megacli.md
@@ -0,0 +1,220 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/megacli/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/megacli/metadata.yaml"
+sidebar_label: "MegaCLI"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MegaCLI
+
+
+<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: megacli
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Examine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics.
+
+Collects adapter, physical drives and battery stats using megacli command-line tool
+
+Executed commands:
+
+ - `sudo -n megacli -LDPDInfo -aAll`
+ - `sudo -n megacli -AdpBbuCmd -a0`
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.
+
+### Default Behavior
+
+#### Auto-Detection
+
+After all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per MegaCLI instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| megacli.adapter_degraded | a dimension per adapter | is degraded |
+| megacli.pd_media_error | a dimension per physical drive | errors/s |
+| megacli.pd_predictive_failure | a dimension per physical drive | failures/s |
+
+### Per battery
+
+Metrics related to Battery Backup Units, each BBU provides its own set of the following metrics.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| megacli.bbu_relative_charge | adapter {battery id} | percentage |
+| megacli.bbu_cycle_count | adapter {battery id} | cycle count |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ megacli_adapter_state ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.adapter_degraded | adapter is in the degraded state (0: false, 1: true) |
+| [ megacli_pd_media_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.pd_media_error | number of physical drive media errors |
+| [ megacli_pd_predictive_failures ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.pd_predictive_failure | number of physical drive predictive failures |
+| [ megacli_bbu_relative_charge ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.bbu_relative_charge | average battery backup unit (BBU) relative state of charge over the last 10 seconds |
+| [ megacli_bbu_cycle_count ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.bbu_cycle_count | average battery backup unit (BBU) charge cycles count over the last 10 seconds |
+
+
+## Setup
+
+### Prerequisites
+
+#### Grant permissions for netdata, to run megacli as sudoer
+
+The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.
+
+Add to your /etc/sudoers file:
+which megacli shows the full path to the binary.
+
+```bash
+netdata ALL=(root) NOPASSWD: /path/to/megacli
+```
+
+
+#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)
+
+The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
+
+As root user, do the following:
+
+```bash
+mkdir /etc/systemd/system/netdata.service.d
+echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+systemctl daemon-reload
+systemctl restart netdata.service
+```
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/megacli.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/megacli.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| do_battery | default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). | no | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration per job
+
+```yaml
+job_name:
+ name: myname
+ update_every: 1
+ priority: 60000
+ penalty: yes
+ autodetection_retry: 0
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `megacli` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin megacli debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
new file mode 100644
index 00000000..8222092a
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -0,0 +1,278 @@
+# -*- coding: utf-8 -*-
+# Description: megacli netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+
+update_every = 5
+
+
+def adapter_charts(ads):
+ order = [
+ 'adapter_degraded',
+ ]
+
+ def dims(ad):
+ return [['adapter_{0}_degraded'.format(a.id), 'adapter {0}'.format(a.id)] for a in ad]
+
+ charts = {
+ 'adapter_degraded': {
+ 'options': [None, 'Adapter State', 'is degraded', 'adapter', 'megacli.adapter_degraded', 'line'],
+ 'lines': dims(ads)
+ },
+ }
+
+ return order, charts
+
+
+def pd_charts(pds):
+ order = [
+ 'pd_media_error',
+ 'pd_predictive_failure',
+ ]
+
+ def dims(k, pd):
+ return [['slot_{0}_{1}'.format(p.id, k), 'slot {0}'.format(p.id), 'incremental'] for p in pd]
+
+ charts = {
+ 'pd_media_error': {
+ 'options': [None, 'Physical Drives Media Errors', 'errors/s', 'pd', 'megacli.pd_media_error', 'line'],
+ 'lines': dims('media_error', pds)
+ },
+ 'pd_predictive_failure': {
+ 'options': [None, 'Physical Drives Predictive Failures', 'failures/s', 'pd',
+ 'megacli.pd_predictive_failure', 'line'],
+ 'lines': dims('predictive_failure', pds)
+ }
+ }
+
+ return order, charts
+
+
+def battery_charts(bats):
+ order = list()
+ charts = dict()
+
+ for b in bats:
+ order.append('bbu_{0}_relative_charge'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_relative_charge'.format(b.id): {
+ 'options': [None, 'Relative State of Charge', 'percentage', 'battery',
+ 'megacli.bbu_relative_charge', 'line'],
+ 'lines': [
+ ['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ for b in bats:
+ order.append('bbu_{0}_cycle_count'.format(b.id))
+ charts.update(
+ {
+ 'bbu_{0}_cycle_count'.format(b.id): {
+ 'options': [None, 'Cycle Count', 'cycle count', 'battery', 'megacli.bbu_cycle_count', 'line'],
+ 'lines': [
+ ['bbu_{0}_cycle_count'.format(b.id), 'adapter {0}'.format(b.id)],
+ ]
+ }
+ }
+ )
+
+ return order, charts
+
+
+RE_ADAPTER = re.compile(
+ r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z ]+)'
+)
+
+RE_VD = re.compile(
+ r'Slot Number: ([0-9]+) Media Error Count: ([0-9]+) Predictive Failure Count: ([0-9]+)'
+)
+
+RE_BATTERY = re.compile(
+ r'BBU Capacity Info for Adapter: ([0-9]+) Relative State of Charge: ([0-9]+) % Cycle Count: ([0-9]+)'
+)
+
+
+def find_adapters(d):
+ keys = ('Adapter #', 'State')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [Adapter(*v) for v in RE_ADAPTER.findall(d)]
+
+
+def find_pds(d):
+ keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
+ d = ' '.join(v.strip() for v in d if v.startswith(keys))
+ return [PD(*v) for v in RE_VD.findall(d)]
+
+
+def find_batteries(d):
+ keys = ('BBU Capacity Info for Adapter', 'Relative State of Charge', 'Cycle Count')
+ d = ' '.join(v.strip() for v in d if v.strip().startswith(keys))
+ return [Battery(*v) for v in RE_BATTERY.findall(d)]
+
+
+class Adapter:
+ def __init__(self, n, state):
+ self.id = n
+ # TODO: Rewrite all of this
+ self.state = int(state in ("Partially Degraded", "Degraded", "Failed"))
+
+ def data(self):
+ return {
+ 'adapter_{0}_degraded'.format(self.id): self.state,
+ }
+
+class PD:
+ def __init__(self, n, media_err, predict_fail):
+ self.id = n
+ self.media_err = media_err
+ self.predict_fail = predict_fail
+
+ def data(self):
+ return {
+ 'slot_{0}_media_error'.format(self.id): self.media_err,
+ 'slot_{0}_predictive_failure'.format(self.id): self.predict_fail,
+ }
+
+
+class Battery:
+ def __init__(self, adapt_id, rel_charge, cycle_count):
+ self.id = adapt_id
+ self.rel_charge = rel_charge
+ self.cycle_count = cycle_count
+
+ def data(self):
+ return {
+ 'bbu_{0}_relative_charge'.format(self.id): self.rel_charge,
+ 'bbu_{0}_cycle_count'.format(self.id): self.cycle_count,
+ }
+
+
+# TODO: hardcoded sudo...
+class Megacli:
+ def __init__(self):
+ self.s = find_binary('sudo')
+ self.m = find_binary('megacli') or find_binary('MegaCli') # Binary on FreeBSD is MegaCli
+ self.sudo_check = [self.s, '-n', '-l']
+ self.disk_info = [self.s, '-n', self.m, '-LDPDInfo', '-aAll', '-NoLog']
+ self.battery_info = [self.s, '-n', self.m, '-AdpBbuCmd', '-a0', '-NoLog']
+
+ def __bool__(self):
+ return bool(self.s and self.m)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.do_battery = self.configuration.get('do_battery')
+ self.megacli = Megacli()
+
+ def check_sudo(self):
+ err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)
+ if err:
+ self.error(''.join(err))
+ return False
+ return True
+
+ def check_disk_info(self):
+ d = self._get_raw_data(command=self.megacli.disk_info)
+ if not d:
+ return False
+
+ ads = find_adapters(d)
+ pds = find_pds(d)
+
+ if not (ads and pds):
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.disk_info)))
+ return False
+
+ o, c = adapter_charts(ads)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ o, c = pd_charts(pds)
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def check_battery(self):
+ d = self._get_raw_data(command=self.megacli.battery_info)
+ if not d:
+ return False
+
+ bats = find_batteries(d)
+
+ if not bats:
+ self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.battery_info)))
+ return False
+
+ o, c = battery_charts(bats)
+ self.order.extend(o)
+ self.definitions.update(c)
+ return True
+
+ def check(self):
+ if not self.megacli:
+ self.error('can\'t locate "sudo" or "megacli" binary')
+ return None
+
+ if not (self.check_sudo() and self.check_disk_info()):
+ return False
+
+ if self.do_battery:
+ self.do_battery = self.check_battery()
+
+ return True
+
+ def get_data(self):
+ data = dict()
+
+ data.update(self.get_adapter_pd_data())
+
+ if self.do_battery:
+ data.update(self.get_battery_data())
+
+ return data or None
+
+ def get_adapter_pd_data(self):
+ raw = self._get_raw_data(command=self.megacli.disk_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for a in find_adapters(raw):
+ data.update(a.data())
+
+ for p in find_pds(raw):
+ data.update(p.data())
+
+ return data
+
+ def get_battery_data(self):
+ raw = self._get_raw_data(command=self.megacli.battery_info)
+ data = dict()
+
+ if not raw:
+ return data
+
+ for b in find_batteries(raw):
+ data.update(b.data())
+
+ return data
diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf
new file mode 100644
index 00000000..1af4292d
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/megacli.conf
@@ -0,0 +1,60 @@
+# netdata python.d.plugin configuration for megacli
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, megacli also supports the following:
+#
+# do_battery: yes/no # default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`).
+#
+# ----------------------------------------------------------------------
+# uncomment the line below to collect battery statistics
+# do_battery: yes
diff --git a/collectors/python.d.plugin/megacli/metadata.yaml b/collectors/python.d.plugin/megacli/metadata.yaml
new file mode 100644
index 00000000..4a2ba43e
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/metadata.yaml
@@ -0,0 +1,193 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: megacli
+ monitored_instance:
+ name: MegaCLI
+ link: "https://wikitech.wikimedia.org/wiki/MegaCli"
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: "hard-drive.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - storage
+ - raid-controller
+ - manage-disks
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Examine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics."
+ method_description: |
+ Collects adapter, physical drives and battery stats using megacli command-line tool
+
+ Executed commands:
+
+ - `sudo -n megacli -LDPDInfo -aAll`
+ - `sudo -n megacli -AdpBbuCmd -a0`
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: "The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password."
+ default_behavior:
+ auto_detection:
+ description: "After all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility"
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Grant permissions for netdata, to run megacli as sudoer
+ description: |
+ The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.
+
+ Add to your /etc/sudoers file:
+ which megacli shows the full path to the binary.
+
+ ```bash
+ netdata ALL=(root) NOPASSWD: /path/to/megacli
+ ```
+ - title: "Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)"
+ description: |
+ The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
+
+ As root user, do the following:
+
+ ```bash
+ mkdir /etc/systemd/system/netdata.service.d
+ echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+ systemctl daemon-reload
+ systemctl restart netdata.service
+ ```
+ configuration:
+ file:
+ name: "python.d/megacli.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: do_battery
+ description: default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`).
+ default_value: no
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration per job
+ config: |
+ job_name:
+ name: myname
+ update_every: 1
+ priority: 60000
+ penalty: yes
+ autodetection_retry: 0
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: megacli_adapter_state
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.adapter_degraded
+ info: "adapter is in the degraded state (0: false, 1: true)"
+ - name: megacli_pd_media_errors
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.pd_media_error
+ info: number of physical drive media errors
+ - name: megacli_pd_predictive_failures
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.pd_predictive_failure
+ info: number of physical drive predictive failures
+ - name: megacli_bbu_relative_charge
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.bbu_relative_charge
+ info: average battery backup unit (BBU) relative state of charge over the last 10 seconds
+ - name: megacli_bbu_cycle_count
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.bbu_cycle_count
+ info: average battery backup unit (BBU) charge cycles count over the last 10 seconds
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: megacli.adapter_degraded
+ description: Adapter State
+ unit: "is degraded"
+ chart_type: line
+ dimensions:
+ - name: a dimension per adapter
+ - name: megacli.pd_media_error
+ description: Physical Drives Media Errors
+ unit: "errors/s"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical drive
+ - name: megacli.pd_predictive_failure
+ description: Physical Drives Predictive Failures
+ unit: "failures/s"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical drive
+ - name: battery
+ description: "Metrics related to Battery Backup Units, each BBU provides its own set of the following metrics."
+ labels: []
+ metrics:
+ - name: megacli.bbu_relative_charge
+ description: Relative State of Charge
+ unit: "percentage"
+ chart_type: line
+ dimensions:
+ - name: adapter {battery id}
+ - name: megacli.bbu_cycle_count
+ description: Cycle Count
+ unit: "cycle count"
+ chart_type: line
+ dimensions:
+ - name: adapter {battery id}
diff --git a/collectors/python.d.plugin/memcached/Makefile.inc b/collectors/python.d.plugin/memcached/Makefile.inc
new file mode 100644
index 00000000..e6035716
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += memcached/memcached.chart.py
+dist_pythonconfig_DATA += memcached/memcached.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += memcached/README.md memcached/Makefile.inc
+
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
new file mode 120000
index 00000000..2cb76d33
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -0,0 +1 @@
+integrations/memcached.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/memcached/integrations/memcached.md b/collectors/python.d.plugin/memcached/integrations/memcached.md
new file mode 100644
index 00000000..113b86c8
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/integrations/memcached.md
@@ -0,0 +1,215 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/memcached/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/memcached/metadata.yaml"
+sidebar_label: "Memcached"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Memcached
+
+
+<img src="https://netdata.cloud/img/memcached.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: memcached
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.
+
+It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Memcached instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| memcached.cache | available, used | MiB |
+| memcached.net | in, out | kilobits/s |
+| memcached.connections | current, rejected, total | connections/s |
+| memcached.items | current, total | items |
+| memcached.evicted_reclaimed | reclaimed, evicted | items |
+| memcached.get | hints, misses | requests |
+| memcached.get_rate | rate | requests/s |
+| memcached.set_rate | rate | requests/s |
+| memcached.delete | hits, misses | requests |
+| memcached.cas | hits, misses, bad value | requests |
+| memcached.increment | hits, misses | requests |
+| memcached.decrement | hits, misses | requests |
+| memcached.touch | hits, misses | requests |
+| memcached.touch_rate | rate | requests/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |
+| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |
+| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/memcached.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/memcached.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| host | the host to connect to. | 127.0.0.1 | no |
+| port | the port to connect to. | 11211 | no |
+| update_every | Sets the default data collection frequency. | 10 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+
+##### localhost
+
+An example configuration for localhost.
+
+```yaml
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 11211
+
+```
+##### localipv4
+
+An example configuration for localipv4.
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 11211
+
+```
+</details>
+
+##### localipv6
+
+An example configuration for localipv6.
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local'
+ host: '::1'
+ port: 11211
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `memcached` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin memcached debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
new file mode 100644
index 00000000..adb9560b
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
@@ -0,0 +1,197 @@
+# -*- coding: utf-8 -*-
+# Description: memcached netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+ORDER = [
+ 'cache',
+ 'net',
+ 'connections',
+ 'items',
+ 'evicted_reclaimed',
+ 'get',
+ 'get_rate',
+ 'set_rate',
+ 'cas',
+ 'delete',
+ 'increment',
+ 'decrement',
+ 'touch',
+ 'touch_rate',
+]
+
+CHARTS = {
+ 'cache': {
+ 'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'],
+ 'lines': [
+ ['avail', 'available', 'absolute', 1, 1 << 20],
+ ['used', 'used', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'net': {
+ 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
+ 'lines': [
+ ['bytes_read', 'in', 'incremental', 8, 1000],
+ ['bytes_written', 'out', 'incremental', -8, 1000],
+ ]
+ },
+ 'connections': {
+ 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
+ 'lines': [
+ ['curr_connections', 'current', 'incremental'],
+ ['rejected_connections', 'rejected', 'incremental'],
+ ['total_connections', 'total', 'incremental']
+ ]
+ },
+ 'items': {
+ 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
+ 'lines': [
+ ['curr_items', 'current', 'absolute'],
+ ['total_items', 'total', 'absolute']
+ ]
+ },
+ 'evicted_reclaimed': {
+ 'options': [None, 'Evicted and Reclaimed Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
+ 'lines': [
+ ['reclaimed', 'reclaimed', 'absolute'],
+ ['evictions', 'evicted', 'absolute']
+ ]
+ },
+ 'get': {
+ 'options': [None, 'Get Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
+ 'lines': [
+ ['get_hits', 'hits', 'percent-of-absolute-row'],
+ ['get_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'get_rate': {
+ 'options': [None, 'Get Request Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
+ 'lines': [
+ ['cmd_get', 'rate', 'incremental']
+ ]
+ },
+ 'set_rate': {
+ 'options': [None, 'Set Request Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
+ 'lines': [
+ ['cmd_set', 'rate', 'incremental']
+ ]
+ },
+ 'delete': {
+ 'options': [None, 'Delete Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
+ 'lines': [
+ ['delete_hits', 'hits', 'percent-of-absolute-row'],
+ ['delete_misses', 'misses', 'percent-of-absolute-row'],
+ ]
+ },
+ 'cas': {
+ 'options': [None, 'Check and Set Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
+ 'lines': [
+ ['cas_hits', 'hits', 'percent-of-absolute-row'],
+ ['cas_misses', 'misses', 'percent-of-absolute-row'],
+ ['cas_badval', 'bad value', 'percent-of-absolute-row']
+ ]
+ },
+ 'increment': {
+ 'options': [None, 'Increment Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
+ 'lines': [
+ ['incr_hits', 'hits', 'percent-of-absolute-row'],
+ ['incr_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'decrement': {
+ 'options': [None, 'Decrement Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
+ 'lines': [
+ ['decr_hits', 'hits', 'percent-of-absolute-row'],
+ ['decr_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'touch': {
+ 'options': [None, 'Touch Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
+ 'lines': [
+ ['touch_hits', 'hits', 'percent-of-absolute-row'],
+ ['touch_misses', 'misses', 'percent-of-absolute-row']
+ ]
+ },
+ 'touch_rate': {
+ 'options': [None, 'Touch Request Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
+ 'lines': [
+ ['cmd_touch', 'rate', 'incremental']
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.request = 'stats\r\n'
+ self.host = 'localhost'
+ self.port = 11211
+ self._keep_alive = True
+ self.unix_socket = None
+
+ def _get_data(self):
+ """
+ Get data from socket
+ :return: dict
+ """
+ response = self._get_raw_data()
+ if response is None:
+ # error has already been logged
+ return None
+
+ if response.startswith('ERROR'):
+ self.error('received ERROR')
+ return None
+
+ try:
+ parsed = response.split('\n')
+ except AttributeError:
+ self.error('response is invalid/empty')
+ return None
+
+ # split the response
+ data = {}
+ for line in parsed:
+ if line.startswith('STAT'):
+ try:
+ t = line[5:].split(' ')
+ data[t[0]] = t[1]
+ except (IndexError, ValueError):
+ self.debug('invalid line received: ' + str(line))
+
+ if not data:
+ self.error("received data doesn't have any records")
+ return None
+
+ # custom calculations
+ try:
+ data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])
+ data['used'] = int(data['bytes'])
+ except (KeyError, ValueError, TypeError):
+ pass
+
+ return data
+
+ def _check_raw_data(self, data):
+ if data.endswith('END\r\n'):
+ self.debug('received full response from memcached')
+ return True
+
+ self.debug('waiting more data from memcached')
+ return False
+
+ def check(self):
+ """
+ Parse configuration, check if memcached is available
+ :return: boolean
+ """
+ self._parse_config()
+ data = self._get_data()
+ if data is None:
+ return False
+ return True
diff --git a/collectors/python.d.plugin/memcached/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf
new file mode 100644
index 00000000..3286b462
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/memcached.conf
@@ -0,0 +1,90 @@
+# netdata python.d.plugin configuration for memcached
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, memcached also supports the following:
+#
+# socket: 'path/to/memcached.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 11211
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 11211
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 11211
+
diff --git a/collectors/python.d.plugin/memcached/metadata.yaml b/collectors/python.d.plugin/memcached/metadata.yaml
new file mode 100644
index 00000000..38c9f685
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/metadata.yaml
@@ -0,0 +1,247 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: memcached
+ monitored_instance:
+ name: Memcached
+ link: https://memcached.org/
+ categories:
+ - data-collection.database-servers
+ icon_filename: "memcached.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - memcached
+ - memcache
+ - cache
+ - database
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching."
+ method_description: "It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats))."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ If no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: python.d/memcached.conf
+ description: ""
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: host
+ description: the host to connect to.
+ default_value: "127.0.0.1"
+ required: false
+ - name: port
+ description: the port to connect to.
+ default_value: "11211"
+ required: false
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 10
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: localhost
+ description: An example configuration for localhost.
+ folding:
+ enabled: false
+ config: |
+ localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 11211
+ - name: localipv4
+ description: An example configuration for localipv4.
+ folding:
+ enabled: true
+ config: |
+ localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 11211
+ - name: localipv6
+ description: An example configuration for localipv6.
+ folding:
+ enabled: true
+ config: |
+ localhost:
+ name: 'local'
+ host: '::1'
+ port: 11211
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: memcached_cache_memory_usage
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf
+ metric: memcached.cache
+ info: cache memory utilization
+ - name: memcached_cache_fill_rate
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf
+ metric: memcached.cache
+ info: average rate the cache fills up (positive), or frees up (negative) space over the last hour
+ - name: memcached_out_of_cache_space_time
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf
+ metric: memcached.cache
+ info: estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: memcached.cache
+ description: Cache Size
+ unit: "MiB"
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: memcached.net
+ description: Network
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: memcached.connections
+ description: Connections
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: current
+ - name: rejected
+ - name: total
+ - name: memcached.items
+ description: Items
+ unit: "items"
+ chart_type: line
+ dimensions:
+ - name: current
+ - name: total
+ - name: memcached.evicted_reclaimed
+ description: Evicted and Reclaimed Items
+ unit: "items"
+ chart_type: line
+ dimensions:
+ - name: reclaimed
+ - name: evicted
+ - name: memcached.get
+ description: Get Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hints
+ - name: misses
+ - name: memcached.get_rate
+ description: Get Request Rate
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: rate
+ - name: memcached.set_rate
+ description: Set Request Rate
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: rate
+ - name: memcached.delete
+ description: Delete Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: memcached.cas
+ description: Check and Set Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: bad value
+ - name: memcached.increment
+ description: Increment Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: memcached.decrement
+ description: Decrement Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: memcached.touch
+ description: Touch Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: memcached.touch_rate
+ description: Touch Request Rate
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: rate
diff --git a/collectors/python.d.plugin/monit/Makefile.inc b/collectors/python.d.plugin/monit/Makefile.inc
new file mode 100644
index 00000000..4a3673fd
--- /dev/null
+++ b/collectors/python.d.plugin/monit/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += monit/monit.chart.py
+dist_pythonconfig_DATA += monit/monit.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += monit/README.md monit/Makefile.inc
+
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
new file mode 120000
index 00000000..ac69496f
--- /dev/null
+++ b/collectors/python.d.plugin/monit/README.md
@@ -0,0 +1 @@
+integrations/monit.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/monit/integrations/monit.md b/collectors/python.d.plugin/monit/integrations/monit.md
new file mode 100644
index 00000000..18219141
--- /dev/null
+++ b/collectors/python.d.plugin/monit/integrations/monit.md
@@ -0,0 +1,214 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/monit/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/monit/metadata.yaml"
+sidebar_label: "Monit"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Monit
+
+
+<img src="https://netdata.cloud/img/monit.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: monit
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.
+
+
+It gathers data from Monit's XML interface.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, this collector will attempt to connect to Monit at `http://localhost:2812`
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Monit instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| monit.filesystems | a dimension per target | filesystems |
+| monit.directories | a dimension per target | directories |
+| monit.files | a dimension per target | files |
+| monit.fifos | a dimension per target | pipes |
+| monit.programs | a dimension per target | programs |
+| monit.services | a dimension per target | processes |
+| monit.process_uptime | a dimension per target | seconds |
+| monit.process_threads | a dimension per target | threads |
+| monit.process_childrens | a dimension per target | children |
+| monit.hosts | a dimension per target | hosts |
+| monit.host_latency | a dimension per target | milliseconds |
+| monit.networks | a dimension per target | interfaces |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/monit.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/monit.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
+| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes |
+| user | Username in case the URL is password protected. | | no |
+| pass | Password in case the URL is password protected. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic configuration example.
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://localhost:2812'
+
+```
+##### Basic Authentication
+
+Example using basic username and password in order to authenticate.
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://localhost:2812'
+ user: 'foo'
+ pass: 'bar'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local'
+ url: 'http://localhost:2812'
+
+remote_job:
+ name: 'remote'
+ url: 'http://192.0.2.1:2812'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `monit` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin monit debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/monit/metadata.yaml b/collectors/python.d.plugin/monit/metadata.yaml
new file mode 100644
index 00000000..b5127318
--- /dev/null
+++ b/collectors/python.d.plugin/monit/metadata.yaml
@@ -0,0 +1,217 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: monit
+ monitored_instance:
+ name: Monit
+ link: https://mmonit.com/monit/
+ categories:
+ - data-collection.synthetic-checks
+ icon_filename: "monit.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - monit
+ - mmonit
+ - supervision tool
+ - monitrc
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.
+ method_description: |
+ It gathers data from Monit's XML interface.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: By default, this collector will attempt to connect to Monit at `http://localhost:2812`
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "python.d/monit.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 1
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: >
+ Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: "local"
+ required: false
+ - name: url
+ description: The URL to fetch Monit's metrics.
+ default_value: http://localhost:2812
+ required: true
+ - name: user
+ description: Username in case the URL is password protected.
+ default_value: ""
+ required: false
+ - name: pass
+ description: Password in case the URL is password protected.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic configuration example.
+ folding:
+ enabled: false
+ config: |
+ localhost:
+ name : 'local'
+ url : 'http://localhost:2812'
+ - name: Basic Authentication
+ description: Example using basic username and password in order to authenticate.
+ config: |
+ localhost:
+ name : 'local'
+ url : 'http://localhost:2812'
+ user: 'foo'
+ pass: 'bar'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ localhost:
+ name: 'local'
+ url: 'http://localhost:2812'
+
+ remote_job:
+ name: 'remote'
+ url: 'http://192.0.2.1:2812'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: monit.filesystems
+ description: Filesystems
+ unit: "filesystems"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.directories
+ description: Directories
+ unit: "directories"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.files
+ description: Files
+ unit: "files"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.fifos
+ description: Pipes (fifo)
+ unit: "pipes"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.programs
+ description: Programs statuses
+ unit: "programs"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.services
+ description: Processes statuses
+ unit: "processes"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.process_uptime
+ description: Processes uptime
+ unit: "seconds"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.process_threads
+ description: Processes threads
+ unit: "threads"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.process_childrens
+ description: Child processes
+ unit: "children"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.hosts
+ description: Hosts
+ unit: "hosts"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.host_latency
+ description: Hosts latency
+ unit: "milliseconds"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
+ - name: monit.networks
+ description: Network interfaces and addresses
+ unit: "interfaces"
+ chart_type: line
+ dimensions:
+ - name: a dimension per target
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
new file mode 100644
index 00000000..5d926961
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.chart.py
@@ -0,0 +1,360 @@
+# -*- coding: utf-8 -*-
+# Description: monit netdata python.d module
+# Author: Evgeniy K. (n0guest)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import xml.etree.ElementTree as ET
+from collections import namedtuple
+
+from bases.FrameworkServices.UrlService import UrlService
+
+MonitType = namedtuple('MonitType', ('index', 'name'))
+
+# see enum Service_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
+# typedef enum {
+# Service_Filesystem = 0,
+# Service_Directory,
+# Service_File,
+# Service_Process,
+# Service_Host,
+# Service_System,
+# Service_Fifo,
+# Service_Program,
+# Service_Net,
+# Service_Last = Service_Net
+# } __attribute__((__packed__)) Service_Type;
+
+TYPE_FILESYSTEM = MonitType(0, 'filesystem')
+TYPE_DIRECTORY = MonitType(1, 'directory')
+TYPE_FILE = MonitType(2, 'file')
+TYPE_PROCESS = MonitType(3, 'process')
+TYPE_HOST = MonitType(4, 'host')
+TYPE_SYSTEM = MonitType(5, 'system')
+TYPE_FIFO = MonitType(6, 'fifo')
+TYPE_PROGRAM = MonitType(7, 'program')
+TYPE_NET = MonitType(8, 'net')
+
+TYPES = (
+ TYPE_FILESYSTEM,
+ TYPE_DIRECTORY,
+ TYPE_FILE,
+ TYPE_PROCESS,
+ TYPE_HOST,
+ TYPE_SYSTEM,
+ TYPE_FIFO,
+ TYPE_PROGRAM,
+ TYPE_NET,
+)
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ 'filesystem',
+ 'directory',
+ 'file',
+ 'process',
+ 'process_uptime',
+ 'process_threads',
+ 'process_children',
+ 'host',
+ 'host_latency',
+ 'system',
+ 'fifo',
+ 'program',
+ 'net'
+]
+
+CHARTS = {
+ 'filesystem': {
+ 'options': ['filesystems', 'Filesystems', 'filesystems', 'filesystem', 'monit.filesystems', 'line'],
+ 'lines': []
+ },
+ 'directory': {
+ 'options': ['directories', 'Directories', 'directories', 'filesystem', 'monit.directories', 'line'],
+ 'lines': []
+ },
+ 'file': {
+ 'options': ['files', 'Files', 'files', 'filesystem', 'monit.files', 'line'],
+ 'lines': []
+ },
+ 'fifo': {
+ 'options': ['fifos', 'Pipes (fifo)', 'pipes', 'filesystem', 'monit.fifos', 'line'],
+ 'lines': []
+ },
+ 'program': {
+ 'options': ['programs', 'Programs statuses', 'programs', 'applications', 'monit.programs', 'line'],
+ 'lines': []
+ },
+ 'process': {
+ 'options': ['processes', 'Processes statuses', 'processes', 'applications', 'monit.services', 'line'],
+ 'lines': []
+ },
+ 'process_uptime': {
+ 'options': ['processes uptime', 'Processes uptime', 'seconds', 'applications',
+ 'monit.process_uptime', 'line', 'hidden'],
+ 'lines': []
+ },
+ 'process_threads': {
+ 'options': ['processes threads', 'Processes threads', 'threads', 'applications',
+ 'monit.process_threads', 'line'],
+ 'lines': []
+ },
+ 'process_children': {
+ 'options': ['processes childrens', 'Child processes', 'children', 'applications',
+ 'monit.process_childrens', 'line'],
+ 'lines': []
+ },
+ 'host': {
+ 'options': ['hosts', 'Hosts', 'hosts', 'network', 'monit.hosts', 'line'],
+ 'lines': []
+ },
+ 'host_latency': {
+ 'options': ['hosts latency', 'Hosts latency', 'milliseconds', 'network', 'monit.host_latency', 'line'],
+ 'lines': []
+ },
+ 'net': {
+ 'options': ['interfaces', 'Network interfaces and addresses', 'interfaces', 'network',
+ 'monit.networks', 'line'],
+ 'lines': []
+ },
+}
+
+
+class BaseMonitService(object):
+ def __init__(self, typ, name, status, monitor):
+ self.type = typ
+ self.name = name
+ self.status = status
+ self.monitor = monitor
+
+ def __repr__(self):
+ return 'MonitService({0}:{1})'.format(self.type.name, self.name)
+
+ def __eq__(self, other):
+ if not isinstance(other, BaseMonitService):
+ return False
+ return self.type == other.type and self.name == other.name
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def is_running(self):
+ return self.status == '0' and self.monitor == '1'
+
+ def key(self):
+ return '{0}_{1}'.format(self.type.name, self.name)
+
+ def data(self):
+ return {self.key(): int(self.is_running())}
+
+
+class ProcessMonitService(BaseMonitService):
+ def __init__(self, typ, name, status, monitor):
+ super(ProcessMonitService, self).__init__(typ, name, status, monitor)
+ self.uptime = None
+ self.threads = None
+ self.children = None
+
+ def __eq__(self, other):
+ return super(ProcessMonitService, self).__eq__(other)
+
+ def __ne__(self, other):
+ return super(ProcessMonitService, self).__ne__(other)
+
+ def __hash__(self):
+ return super(ProcessMonitService, self).__hash__()
+
+ def uptime_key(self):
+ return 'process_uptime_{0}'.format(self.name)
+
+ def threads_key(self):
+ return 'process_threads_{0}'.format(self.name)
+
+ def children_key(self):
+ return 'process_children_{0}'.format(self.name)
+
+ def data(self):
+ base_data = super(ProcessMonitService, self).data()
+ # skipping bugged metrics with negative uptime (monit before v5.16)
+ uptime = self.uptime if self.uptime and int(self.uptime) >= 0 else None
+ data = {
+ self.uptime_key(): uptime,
+ self.threads_key(): self.threads,
+ self.children_key(): self.children,
+ }
+ data.update(base_data)
+
+ return data
+
+
+class HostMonitService(BaseMonitService):
+ def __init__(self, typ, name, status, monitor):
+ super(HostMonitService, self).__init__(typ, name, status, monitor)
+ self.latency = None
+
+ def __eq__(self, other):
+ return super(HostMonitService, self).__eq__(other)
+
+ def __ne__(self, other):
+ return super(HostMonitService, self).__ne__(other)
+
+ def __hash__(self):
+ return super(HostMonitService, self).__hash__()
+
+ def latency_key(self):
+ return 'host_latency_{0}'.format(self.name)
+
+ def data(self):
+ base_data = super(HostMonitService, self).data()
+ latency = float(self.latency) * 1000000 if self.latency else None
+ data = {self.latency_key(): latency}
+ data.update(base_data)
+
+ return data
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ base_url = self.configuration.get('url', "http://localhost:2812")
+ self.url = '{0}/_status?format=xml&level=full'.format(base_url)
+ self.active_services = list()
+
+ def parse(self, raw):
+ try:
+ root = ET.fromstring(raw)
+ except ET.ParseError:
+ self.error("URL {0} didn't return a valid XML page. Please check your settings.".format(self.url))
+ return None
+ return root
+
+ def _get_data(self):
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ root = self.parse(raw)
+ if root is None:
+ return None
+
+ services = self.get_services(root)
+ if not services:
+ return None
+
+ if len(self.charts) > 0:
+ self.update_charts(services)
+
+ data = dict()
+
+ for svc in services:
+ data.update(svc.data())
+
+ return data
+
+ def get_services(self, root):
+ services = list()
+
+ for typ in TYPES:
+ if typ == TYPE_SYSTEM:
+ self.debug("skipping service from '{0}' category, it's useless in graphs".format(TYPE_SYSTEM.name))
+ continue
+
+ xpath_query = "./service[@type='{0}']".format(typ.index)
+ self.debug('Searching for {0} as {1}'.format(typ.name, xpath_query))
+
+ for svc_root in root.findall(xpath_query):
+ svc = create_service(svc_root, typ)
+ self.debug('=> found {0} with type={1}, status={2}, monitoring={3}'.format(
+ svc.name, svc.type.name, svc.status, svc.monitor))
+
+ services.append(svc)
+
+ return services
+
+ def update_charts(self, services):
+ remove = [svc for svc in self.active_services if svc not in services]
+ add = [svc for svc in services if svc not in self.active_services]
+
+ self.remove_services_from_charts(remove)
+ self.add_services_to_charts(add)
+
+ self.active_services = services
+
+ def add_services_to_charts(self, services):
+ for svc in services:
+ if svc.type == TYPE_HOST:
+ self.charts['host_latency'].add_dimension([svc.latency_key(), svc.name, 'absolute', 1000, 1000000])
+ if svc.type == TYPE_PROCESS:
+ self.charts['process_uptime'].add_dimension([svc.uptime_key(), svc.name])
+ self.charts['process_threads'].add_dimension([svc.threads_key(), svc.name])
+ self.charts['process_children'].add_dimension([svc.children_key(), svc.name])
+ self.charts[svc.type.name].add_dimension([svc.key(), svc.name])
+
+ def remove_services_from_charts(self, services):
+ for svc in services:
+ if svc.type == TYPE_HOST:
+ self.charts['host_latency'].del_dimension(svc.latency_key(), False)
+ if svc.type == TYPE_PROCESS:
+ self.charts['process_uptime'].del_dimension(svc.uptime_key(), False)
+ self.charts['process_threads'].del_dimension(svc.threads_key(), False)
+ self.charts['process_children'].del_dimension(svc.children_key(), False)
+ self.charts[svc.type.name].del_dimension(svc.key(), False)
+
+
+def create_service(root, typ):
+ if typ == TYPE_HOST:
+ return create_host_service(root)
+ elif typ == TYPE_PROCESS:
+ return create_process_service(root)
+ return create_base_service(root, typ)
+
+
+def create_host_service(root):
+ svc = HostMonitService(
+ TYPE_HOST,
+ root.find('name').text,
+ root.find('status').text,
+ root.find('monitor').text,
+ )
+
+ latency = root.find('./icmp/responsetime')
+ if latency is not None:
+ svc.latency = latency.text
+
+ return svc
+
+
+def create_process_service(root):
+ svc = ProcessMonitService(
+ TYPE_PROCESS,
+ root.find('name').text,
+ root.find('status').text,
+ root.find('monitor').text,
+ )
+
+ uptime = root.find('uptime')
+ if uptime is not None:
+ svc.uptime = uptime.text
+
+ threads = root.find('threads')
+ if threads is not None:
+ svc.threads = threads.text
+
+ children = root.find('children')
+ if children is not None:
+ svc.children = children.text
+
+ return svc
+
+
+def create_base_service(root, typ):
+ return BaseMonitService(
+ typ,
+ root.find('name').text,
+ root.find('status').text,
+ root.find('monitor').text,
+ )
diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf
new file mode 100644
index 00000000..9a3fb693
--- /dev/null
+++ b/collectors/python.d.plugin/monit/monit.conf
@@ -0,0 +1,86 @@
+# netdata python.d.plugin configuration for monit
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'URL' # the URL to fetch monit's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# Example
+#
+# local:
+# name : 'Local Monit'
+# url : 'http://localhost:2812'
+#
+# "local" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
+# in the monit section.
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:2812'
diff --git a/collectors/python.d.plugin/nsd/Makefile.inc b/collectors/python.d.plugin/nsd/Makefile.inc
new file mode 100644
index 00000000..58e9fd67
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nsd/nsd.chart.py
+dist_pythonconfig_DATA += nsd/nsd.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nsd/README.md nsd/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
new file mode 120000
index 00000000..59fcfe49
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -0,0 +1 @@
+integrations/name_server_daemon.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md b/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md
new file mode 100644
index 00000000..0e66c44e
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md
@@ -0,0 +1,199 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nsd/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nsd/metadata.yaml"
+sidebar_label: "Name Server Daemon"
+learn_status: "Published"
+learn_rel_path: "Data Collection/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Name Server Daemon
+
+
+<img src="https://netdata.cloud/img/nsd.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: nsd
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors NSD statistics like queries, zones, protocols, query types and more.
+
+
+It uses the `nsd-control stats_noreset` command to gather metrics.
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Name Server Daemon instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nsd.queries | queries | queries/s |
+| nsd.zones | master, slave | zones |
+| nsd.protocols | udp, udp6, tcp, tcp6 | queries/s |
+| nsd.type | A, NS, CNAME, SOA, PTR, HINFO, MX, NAPTR, TXT, AAAA, SRV, ANY | queries/s |
+| nsd.transfer | NOTIFY, AXFR | queries/s |
+| nsd.rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN | queries/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### NSD version
+
+The version of `nsd` must be 4.0+.
+
+
+#### Provide Netdata the permissions to run the command
+
+Netdata must have permissions to run the `nsd-control stats_noreset` command.
+
+You can:
+
+- Add "netdata" user to "nsd" group:
+ ```
+ usermod -aG nsd netdata
+ ```
+- Add Netdata to sudoers
+ 1. Edit the sudoers file:
+ ```
+ visudo -f /etc/sudoers.d/netdata
+ ```
+ 2. Add the entry:
+ ```
+ Defaults:netdata !requiretty
+ netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
+ ```
+
+ > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/nsd.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/nsd.conf
+```
+#### Options
+
+This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 30 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| command | The command to run | nsd-control stats_noreset | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic configuration example.
+
+```yaml
+local:
+ name: 'nsd_local'
+ command: 'nsd-control stats_noreset'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `nsd` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin nsd debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/nsd/metadata.yaml b/collectors/python.d.plugin/nsd/metadata.yaml
new file mode 100644
index 00000000..f5e2c46b
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/metadata.yaml
@@ -0,0 +1,201 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: nsd
+ monitored_instance:
+ name: Name Server Daemon
+ link: https://nsd.docs.nlnetlabs.nl/en/latest/#
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ icon_filename: "nsd.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - nsd
+ - name server daemon
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors NSD statistics like queries, zones, protocols, query types and more.
+ method_description: |
+ It uses the `nsd-control stats_noreset` command to gather metrics.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: If permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: NSD version
+ description: |
+ The version of `nsd` must be 4.0+.
+ - title: Provide Netdata the permissions to run the command
+ description: |
+ Netdata must have permissions to run the `nsd-control stats_noreset` command.
+
+ You can:
+
+ - Add "netdata" user to "nsd" group:
+ ```
+ usermod -aG nsd netdata
+ ```
+ - Add Netdata to sudoers
+ 1. Edit the sudoers file:
+ ```
+ visudo -f /etc/sudoers.d/netdata
+ ```
+ 2. Add the entry:
+ ```
+ Defaults:netdata !requiretty
+ netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
+ ```
+
+ > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.
+
+ configuration:
+ file:
+ name: "python.d/nsd.conf"
+ options:
+ description: |
+ This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
+
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 30
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: >
+ Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed
+ running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: command
+ description: The command to run
+ default_value: "nsd-control stats_noreset"
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic configuration example.
+ folding:
+ enabled: false
+ config: |
+ local:
+ name: 'nsd_local'
+ command: 'nsd-control stats_noreset'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: nsd.queries
+ description: queries
+ unit: "queries/s"
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: nsd.zones
+ description: zones
+ unit: "zones"
+ chart_type: stacked
+ dimensions:
+ - name: master
+ - name: slave
+ - name: nsd.protocols
+ description: protocol
+ unit: "queries/s"
+ chart_type: stacked
+ dimensions:
+ - name: udp
+ - name: udp6
+ - name: tcp
+ - name: tcp6
+ - name: nsd.type
+ description: query type
+ unit: "queries/s"
+ chart_type: stacked
+ dimensions:
+ - name: A
+ - name: NS
+ - name: CNAME
+ - name: SOA
+ - name: PTR
+ - name: HINFO
+ - name: MX
+ - name: NAPTR
+ - name: TXT
+ - name: AAAA
+ - name: SRV
+ - name: ANY
+ - name: nsd.transfer
+ description: transfer
+ unit: "queries/s"
+ chart_type: stacked
+ dimensions:
+ - name: NOTIFY
+ - name: AXFR
+ - name: nsd.rcode
+ description: return code
+ unit: "queries/s"
+ chart_type: stacked
+ dimensions:
+ - name: NOERROR
+ - name: FORMERR
+ - name: SERVFAIL
+ - name: NXDOMAIN
+ - name: NOTIMP
+ - name: REFUSED
+ - name: YXDOMAIN
diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
new file mode 100644
index 00000000..6f9b2cec
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Description: NSD `nsd-control stats_noreset` netdata python.d module
+# Author: <383c57 at gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+update_every = 30
+
+NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
+REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
+
+ORDER = [
+ 'queries',
+ 'zones',
+ 'protocol',
+ 'type',
+ 'transfer',
+ 'rcode',
+]
+
+CHARTS = {
+ 'queries': {
+ 'options': [None, 'queries', 'queries/s', 'queries', 'nsd.queries', 'line'],
+ 'lines': [
+ ['num_queries', 'queries', 'incremental']
+ ]
+ },
+ 'zones': {
+ 'options': [None, 'zones', 'zones', 'zones', 'nsd.zones', 'stacked'],
+ 'lines': [
+ ['zone_master', 'master', 'absolute'],
+ ['zone_slave', 'slave', 'absolute']
+ ]
+ },
+ 'protocol': {
+ 'options': [None, 'protocol', 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
+ 'lines': [
+ ['num_udp', 'udp', 'incremental'],
+ ['num_udp6', 'udp6', 'incremental'],
+ ['num_tcp', 'tcp', 'incremental'],
+ ['num_tcp6', 'tcp6', 'incremental']
+ ]
+ },
+ 'type': {
+ 'options': [None, 'query type', 'queries/s', 'query type', 'nsd.type', 'stacked'],
+ 'lines': [
+ ['num_type_A', 'A', 'incremental'],
+ ['num_type_NS', 'NS', 'incremental'],
+ ['num_type_CNAME', 'CNAME', 'incremental'],
+ ['num_type_SOA', 'SOA', 'incremental'],
+ ['num_type_PTR', 'PTR', 'incremental'],
+ ['num_type_HINFO', 'HINFO', 'incremental'],
+ ['num_type_MX', 'MX', 'incremental'],
+ ['num_type_NAPTR', 'NAPTR', 'incremental'],
+ ['num_type_TXT', 'TXT', 'incremental'],
+ ['num_type_AAAA', 'AAAA', 'incremental'],
+ ['num_type_SRV', 'SRV', 'incremental'],
+ ['num_type_TYPE255', 'ANY', 'incremental']
+ ]
+ },
+ 'transfer': {
+ 'options': [None, 'transfer', 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
+ 'lines': [
+ ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
+ ['num_type_TYPE252', 'AXFR', 'incremental']
+ ]
+ },
+ 'rcode': {
+ 'options': [None, 'return code', 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
+ 'lines': [
+ ['num_rcode_NOERROR', 'NOERROR', 'incremental'],
+ ['num_rcode_FORMERR', 'FORMERR', 'incremental'],
+ ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
+ ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
+ ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
+ ['num_rcode_REFUSED', 'REFUSED', 'incremental'],
+ ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = NSD_CONTROL_COMMAND
+
+ def _get_data(self):
+ lines = self._get_raw_data()
+ if not lines:
+ return None
+
+ stats = dict(
+ (k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines))
+ )
+ stats.setdefault('num_opcode_NOTIFY', 0)
+ stats.setdefault('num_type_TYPE252', 0)
+ stats.setdefault('num_type_TYPE255', 0)
+
+ return stats
diff --git a/collectors/python.d.plugin/nsd/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf
new file mode 100644
index 00000000..77a8a317
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/nsd.conf
@@ -0,0 +1,91 @@
+# netdata python.d.plugin configuration for nsd
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# nsd-control is slow, so once every 30 seconds
+# update_every: 30
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, nsd also supports the following:
+#
+# command: 'nsd-control stats_noreset' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# IMPORTANT Information
+#
+# Netdata must have permissions to run `nsd-control stats_noreset` command
+#
+# - Example-1 (use "sudo")
+# 1. sudoers (e.g. visudo -f /etc/sudoers.d/netdata)
+# Defaults:netdata !requiretty
+# netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
+# 2. etc/netdata/python.d/nsd.conf
+# local:
+# update_every: 30
+# command: 'sudo /usr/sbin/nsd-control stats_noreset'
+#
+# - Example-2 (add "netdata" user to "nsd" group)
+# usermod -aG nsd netdata
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ update_every: 30
+ command: 'nsd-control stats_noreset'
diff --git a/collectors/python.d.plugin/nvidia_smi/Makefile.inc b/collectors/python.d.plugin/nvidia_smi/Makefile.inc
new file mode 100644
index 00000000..52fb25a6
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nvidia_smi/nvidia_smi.chart.py
+dist_pythonconfig_DATA += nvidia_smi/nvidia_smi.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nvidia_smi/README.md nvidia_smi/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
new file mode 100644
index 00000000..7d45289a
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/README.md
@@ -0,0 +1,157 @@
+<!--
+title: "Nvidia GPU monitoring with Netdata"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nvidia_smi/README.md"
+sidebar_label: "nvidia_smi-python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Integrations/Monitor/Devices"
+-->
+
+# Nvidia GPU collector
+
+Monitors performance metrics (memory usage, fan speed, pcie bandwidth utilization, temperature, etc.) using `nvidia-smi` cli tool.
+
+## Requirements and Notes
+
+- You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
+- You must enable this plugin, as its disabled by default due to minor performance issues:
+ ```bash
+ cd /etc/netdata # Replace this path with your Netdata config directory, if different
+ sudo ./edit-config python.d.conf
+ ```
+ Remove the '#' before nvidia_smi so it reads: `nvidia_smi: yes`.
+
+- On some systems when the GPU is idle the `nvidia-smi` tool unloads and there is added latency again when it is next queried. If you are running GPUs under constant workload this isn't likely to be an issue.
+- Currently the `nvidia-smi` tool is being queried via cli. Updating the plugin to use the nvidia c/c++ API directly should resolve this issue. See discussion here: <https://github.com/netdata/netdata/pull/4357>
+- Contributions are welcome.
+- Make sure `netdata` user can execute `/usr/bin/nvidia-smi` or wherever your binary is.
+- If `nvidia-smi` process [is not killed after netdata restart](https://github.com/netdata/netdata/issues/7143) you need to off `loop_mode`.
+- `poll_seconds` is how often in seconds the tool is polled for as an integer.
+
+## Charts
+
+It produces the following charts:
+
+- PCI Express Bandwidth Utilization in `KiB/s`
+- Fan Speed in `percentage`
+- GPU Utilization in `percentage`
+- Memory Bandwidth Utilization in `percentage`
+- Encoder/Decoder Utilization in `percentage`
+- Memory Usage in `MiB`
+- Temperature in `celsius`
+- Clock Frequencies in `MHz`
+- Power Utilization in `Watts`
+- Memory Used by Each Process in `MiB`
+- Memory Used by Each User in `MiB`
+- Number of User on GPU in `num`
+
+## Configuration
+
+Edit the `python.d/nvidia_smi.conf` configuration file using `edit-config` from the Netdata [config
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nvidia_smi.conf
+```
+
+Sample:
+
+```yaml
+loop_mode : yes
+poll_seconds : 1
+exclude_zero_memory_users : yes
+```
+
+
+### Troubleshooting
+
+To troubleshoot issues with the `nvidia_smi` module, run the `python.d.plugin` with the debug option enabled. The
+output will give you the output of the data collection job or error messages on why the collector isn't working.
+
+First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
+not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
+plugin's directory, switch to the `netdata` user.
+
+```bash
+cd /usr/libexec/netdata/plugins.d/
+sudo su -s /bin/bash netdata
+```
+
+Now you can manually run the `nvidia_smi` module in debug mode:
+
+```bash
+./python.d.plugin nvidia_smi debug trace
+```
+
+## Docker
+
+GPU monitoring in a docker container is possible with [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) installed on the host system, and `gcompat` added to the `NETDATA_EXTRA_APK_PACKAGES` environment variable.
+
+Sample `docker-compose.yml`
+```yaml
+version: '3'
+services:
+ netdata:
+ image: netdata/netdata
+ container_name: netdata
+ hostname: example.com # set to fqdn of host
+ ports:
+ - 19999:19999
+ restart: unless-stopped
+ cap_add:
+ - SYS_PTRACE
+ security_opt:
+ - apparmor:unconfined
+ environment:
+ - NETDATA_EXTRA_APK_PACKAGES=gcompat
+ volumes:
+ - netdataconfig:/etc/netdata
+ - netdatalib:/var/lib/netdata
+ - netdatacache:/var/cache/netdata
+ - /etc/passwd:/host/etc/passwd:ro
+ - /etc/group:/host/etc/group:ro
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /etc/os-release:/host/etc/os-release:ro
+ deploy:
+ resources:
+ reservations:
+ devices:
+ - driver: nvidia
+ count: all
+ capabilities: [gpu]
+
+volumes:
+ netdataconfig:
+ netdatalib:
+ netdatacache:
+```
+
+Sample `docker run`
+```yaml
+docker run -d --name=netdata \
+ -p 19999:19999 \
+ -e NETDATA_EXTRA_APK_PACKAGES=gcompat \
+ -v netdataconfig:/etc/netdata \
+ -v netdatalib:/var/lib/netdata \
+ -v netdatacache:/var/cache/netdata \
+ -v /etc/passwd:/host/etc/passwd:ro \
+ -v /etc/group:/host/etc/group:ro \
+ -v /proc:/host/proc:ro \
+ -v /sys:/host/sys:ro \
+ -v /etc/os-release:/host/etc/os-release:ro \
+ --restart unless-stopped \
+ --cap-add SYS_PTRACE \
+ --security-opt apparmor=unconfined \
+ --gpus all \
+ netdata/netdata
+```
+
+### Docker Troubleshooting
+To troubleshoot `nvidia-smi` in a docker container, first confirm that `nvidia-smi` is working on the host system. If that is working correctly, run `docker exec -it netdata nvidia-smi` to confirm it's working within the docker container. If `nvidia-smi` is fuctioning both inside and outside of the container, confirm that `nvidia-smi: yes` is uncommented in `python.d.conf`.
+```bash
+docker exec -it netdata bash
+cd /etc/netdata
+./edit-config python.d.conf
+```
diff --git a/collectors/python.d.plugin/nvidia_smi/metadata.yaml b/collectors/python.d.plugin/nvidia_smi/metadata.yaml
new file mode 100644
index 00000000..9bf1e6ca
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/metadata.yaml
@@ -0,0 +1,166 @@
+# This collector will not appear in documentation, as the go version is preferred,
+# https://github.com/netdata/go.d.plugin/blob/master/modules/nvidia_smi/README.md
+#
+# meta:
+# plugin_name: python.d.plugin
+# module_name: nvidia_smi
+# monitored_instance:
+# name: python.d nvidia_smi
+# link: ''
+# categories: []
+# icon_filename: ''
+# related_resources:
+# integrations:
+# list: []
+# info_provided_to_referring_integrations:
+# description: ''
+# keywords: []
+# most_popular: false
+# overview:
+# data_collection:
+# metrics_description: ''
+# method_description: ''
+# supported_platforms:
+# include: []
+# exclude: []
+# multi_instance: true
+# additional_permissions:
+# description: ''
+# default_behavior:
+# auto_detection:
+# description: ''
+# limits:
+# description: ''
+# performance_impact:
+# description: ''
+# setup:
+# prerequisites:
+# list: []
+# configuration:
+# file:
+# name: ''
+# description: ''
+# options:
+# description: ''
+# folding:
+# title: ''
+# enabled: true
+# list: []
+# examples:
+# folding:
+# enabled: true
+# title: ''
+# list: []
+# troubleshooting:
+# problems:
+# list: []
+# alerts: []
+# metrics:
+# folding:
+# title: Metrics
+# enabled: false
+# description: ""
+# availability: []
+# scopes:
+# - name: GPU
+# description: ""
+# labels: []
+# metrics:
+# - name: nvidia_smi.pci_bandwidth
+# description: PCI Express Bandwidth Utilization
+# unit: "KiB/s"
+# chart_type: area
+# dimensions:
+# - name: rx
+# - name: tx
+# - name: nvidia_smi.pci_bandwidth_percent
+# description: PCI Express Bandwidth Percent
+# unit: "percentage"
+# chart_type: area
+# dimensions:
+# - name: rx_percent
+# - name: tx_percent
+# - name: nvidia_smi.fan_speed
+# description: Fan Speed
+# unit: "percentage"
+# chart_type: line
+# dimensions:
+# - name: speed
+# - name: nvidia_smi.gpu_utilization
+# description: GPU Utilization
+# unit: "percentage"
+# chart_type: line
+# dimensions:
+# - name: utilization
+# - name: nvidia_smi.mem_utilization
+# description: Memory Bandwidth Utilization
+# unit: "percentage"
+# chart_type: line
+# dimensions:
+# - name: utilization
+# - name: nvidia_smi.encoder_utilization
+# description: Encoder/Decoder Utilization
+# unit: "percentage"
+# chart_type: line
+# dimensions:
+# - name: encoder
+# - name: decoder
+# - name: nvidia_smi.memory_allocated
+# description: Memory Usage
+# unit: "MiB"
+# chart_type: stacked
+# dimensions:
+# - name: free
+# - name: used
+# - name: nvidia_smi.bar1_memory_usage
+# description: Bar1 Memory Usage
+# unit: "MiB"
+# chart_type: stacked
+# dimensions:
+# - name: free
+# - name: used
+# - name: nvidia_smi.temperature
+# description: Temperature
+# unit: "celsius"
+# chart_type: line
+# dimensions:
+# - name: temp
+# - name: nvidia_smi.clocks
+# description: Clock Frequencies
+# unit: "MHz"
+# chart_type: line
+# dimensions:
+# - name: graphics
+# - name: video
+# - name: sm
+# - name: mem
+# - name: nvidia_smi.power
+# description: Power Utilization
+# unit: "Watts"
+# chart_type: line
+# dimensions:
+# - name: power
+# - name: nvidia_smi.power_state
+# description: Power State
+# unit: "state"
+# chart_type: line
+# dimensions:
+# - name: a dimension per {power_state}
+# - name: nvidia_smi.processes_mem
+# description: Memory Used by Each Process
+# unit: "MiB"
+# chart_type: stacked
+# dimensions:
+# - name: a dimension per process
+# - name: nvidia_smi.user_mem
+# description: Memory Used by Each User
+# unit: "MiB"
+# chart_type: stacked
+# dimensions:
+# - name: a dimension per user
+# - name: nvidia_smi.user_num
+# description: Number of User on GPU
+# unit: "num"
+# chart_type: line
+# dimensions:
+# - name: users
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
new file mode 100644
index 00000000..556a6143
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
@@ -0,0 +1,651 @@
+# -*- coding: utf-8 -*-
+# Description: nvidia-smi netdata python.d module
+# Original Author: Steven Noonan (tycho)
+# Author: Ilya Mashchenko (ilyam8)
+# User Memory Stat Author: Guido Scatena (scatenag)
+
+import os
+import pwd
+import subprocess
+import threading
+import xml.etree.ElementTree as et
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
+
+disabled_by_default = True
+
+NVIDIA_SMI = 'nvidia-smi'
+
+NOT_AVAILABLE = 'N/A'
+
+EMPTY_ROW = ''
+EMPTY_ROW_LIMIT = 500
+POLLER_BREAK_ROW = '</nvidia_smi_log>'
+
+PCI_BANDWIDTH = 'pci_bandwidth'
+PCI_BANDWIDTH_PERCENT = 'pci_bandwidth_percent'
+FAN_SPEED = 'fan_speed'
+GPU_UTIL = 'gpu_utilization'
+MEM_UTIL = 'mem_utilization'
+ENCODER_UTIL = 'encoder_utilization'
+MEM_USAGE = 'mem_usage'
+BAR_USAGE = 'bar1_mem_usage'
+TEMPERATURE = 'temperature'
+CLOCKS = 'clocks'
+POWER = 'power'
+POWER_STATE = 'power_state'
+PROCESSES_MEM = 'processes_mem'
+USER_MEM = 'user_mem'
+USER_NUM = 'user_num'
+
+ORDER = [
+ PCI_BANDWIDTH,
+ PCI_BANDWIDTH_PERCENT,
+ FAN_SPEED,
+ GPU_UTIL,
+ MEM_UTIL,
+ ENCODER_UTIL,
+ MEM_USAGE,
+ BAR_USAGE,
+ TEMPERATURE,
+ CLOCKS,
+ POWER,
+ POWER_STATE,
+ PROCESSES_MEM,
+ USER_MEM,
+ USER_NUM,
+]
+
+# https://docs.nvidia.com/gameworks/content/gameworkslibrary/coresdk/nvapi/group__gpupstate.html
+POWER_STATES = ['P' + str(i) for i in range(0, 16)]
+
+# PCI Transfer data rate in gigabits per second (Gb/s) per generation
+PCI_SPEED = {
+ "1": 2.5,
+ "2": 5,
+ "3": 8,
+ "4": 16,
+ "5": 32
+}
+# PCI encoding per generation
+PCI_ENCODING = {
+ "1": 2 / 10,
+ "2": 2 / 10,
+ "3": 2 / 130,
+ "4": 2 / 130,
+ "5": 2 / 130
+}
+
+
+def gpu_charts(gpu):
+ fam = gpu.full_name()
+
+ charts = {
+ PCI_BANDWIDTH: {
+ 'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
+ 'lines': [
+ ['rx_util', 'rx', 'absolute', 1, 1],
+ ['tx_util', 'tx', 'absolute', 1, -1],
+ ]
+ },
+ PCI_BANDWIDTH_PERCENT: {
+ 'options': [None, 'PCI Express Bandwidth Percent', 'percentage', fam, 'nvidia_smi.pci_bandwidth_percent',
+ 'area'],
+ 'lines': [
+ ['rx_util_percent', 'rx_percent'],
+ ['tx_util_percent', 'tx_percent'],
+ ]
+ },
+ FAN_SPEED: {
+ 'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
+ 'lines': [
+ ['fan_speed', 'speed'],
+ ]
+ },
+ GPU_UTIL: {
+ 'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'],
+ 'lines': [
+ ['gpu_util', 'utilization'],
+ ]
+ },
+ MEM_UTIL: {
+ 'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'],
+ 'lines': [
+ ['memory_util', 'utilization'],
+ ]
+ },
+ ENCODER_UTIL: {
+ 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization',
+ 'line'],
+ 'lines': [
+ ['encoder_util', 'encoder'],
+ ['decoder_util', 'decoder'],
+ ]
+ },
+ MEM_USAGE: {
+ 'options': [None, 'Memory Usage', 'MiB', fam, 'nvidia_smi.memory_allocated', 'stacked'],
+ 'lines': [
+ ['fb_memory_free', 'free'],
+ ['fb_memory_used', 'used'],
+ ]
+ },
+ BAR_USAGE: {
+ 'options': [None, 'Bar1 Memory Usage', 'MiB', fam, 'nvidia_smi.bar1_memory_usage', 'stacked'],
+ 'lines': [
+ ['bar1_memory_free', 'free'],
+ ['bar1_memory_used', 'used'],
+ ]
+ },
+ TEMPERATURE: {
+ 'options': [None, 'Temperature', 'celsius', fam, 'nvidia_smi.temperature', 'line'],
+ 'lines': [
+ ['gpu_temp', 'temp'],
+ ]
+ },
+ CLOCKS: {
+ 'options': [None, 'Clock Frequencies', 'MHz', fam, 'nvidia_smi.clocks', 'line'],
+ 'lines': [
+ ['graphics_clock', 'graphics'],
+ ['video_clock', 'video'],
+ ['sm_clock', 'sm'],
+ ['mem_clock', 'mem'],
+ ]
+ },
+ POWER: {
+ 'options': [None, 'Power Utilization', 'Watts', fam, 'nvidia_smi.power', 'line'],
+ 'lines': [
+ ['power_draw', 'power', 'absolute', 1, 100],
+ ]
+ },
+ POWER_STATE: {
+ 'options': [None, 'Power State', 'state', fam, 'nvidia_smi.power_state', 'line'],
+ 'lines': [['power_state_' + v.lower(), v, 'absolute'] for v in POWER_STATES]
+ },
+ PROCESSES_MEM: {
+ 'options': [None, 'Memory Used by Each Process', 'MiB', fam, 'nvidia_smi.processes_mem', 'stacked'],
+ 'lines': []
+ },
+ USER_MEM: {
+ 'options': [None, 'Memory Used by Each User', 'MiB', fam, 'nvidia_smi.user_mem', 'stacked'],
+ 'lines': []
+ },
+ USER_NUM: {
+ 'options': [None, 'Number of User on GPU', 'num', fam, 'nvidia_smi.user_num', 'line'],
+ 'lines': [
+ ['user_num', 'users'],
+ ]
+ },
+ }
+
+ idx = gpu.num
+
+ order = ['gpu{0}_{1}'.format(idx, v) for v in ORDER]
+ charts = dict(('gpu{0}_{1}'.format(idx, k), v) for k, v in charts.items())
+
+ for chart in charts.values():
+ for line in chart['lines']:
+ line[0] = 'gpu{0}_{1}'.format(idx, line[0])
+
+ return order, charts
+
+
+class NvidiaSMI:
+ def __init__(self):
+ self.command = find_binary(NVIDIA_SMI)
+ self.active_proc = None
+
+ def run_once(self):
+ proc = subprocess.Popen([self.command, '-x', '-q'], stdout=subprocess.PIPE)
+ stdout, _ = proc.communicate()
+ return stdout
+
+ def run_loop(self, interval):
+ if self.active_proc:
+ self.kill()
+ proc = subprocess.Popen([self.command, '-x', '-q', '-l', str(interval)], stdout=subprocess.PIPE)
+ self.active_proc = proc
+ return proc.stdout
+
+ def kill(self):
+ if self.active_proc:
+ self.active_proc.kill()
+ self.active_proc = None
+
+
+class NvidiaSMIPoller(threading.Thread):
+ def __init__(self, poll_interval):
+ threading.Thread.__init__(self)
+ self.daemon = True
+
+ self.smi = NvidiaSMI()
+ self.interval = poll_interval
+
+ self.lock = threading.RLock()
+ self.last_data = str()
+ self.exit = False
+ self.empty_rows = 0
+ self.rows = list()
+
+ def has_smi(self):
+ return bool(self.smi.command)
+
+ def run_once(self):
+ return self.smi.run_once()
+
+ def run(self):
+ out = self.smi.run_loop(self.interval)
+
+ for row in out:
+ if self.exit or self.empty_rows > EMPTY_ROW_LIMIT:
+ break
+ self.process_row(row)
+ self.smi.kill()
+
+ def process_row(self, row):
+ row = row.decode()
+ self.empty_rows += (row == EMPTY_ROW)
+ self.rows.append(row)
+
+ if POLLER_BREAK_ROW in row:
+ self.lock.acquire()
+ self.last_data = '\n'.join(self.rows)
+ self.lock.release()
+
+ self.rows = list()
+ self.empty_rows = 0
+
+ def is_started(self):
+ return self.ident is not None
+
+ def shutdown(self):
+ self.exit = True
+
+ def data(self):
+ self.lock.acquire()
+ data = self.last_data
+ self.lock.release()
+ return data
+
+
+def handle_attr_error(method):
+ def on_call(*args, **kwargs):
+ try:
+ return method(*args, **kwargs)
+ except AttributeError:
+ return None
+
+ return on_call
+
+
+def handle_value_error(method):
+ def on_call(*args, **kwargs):
+ try:
+ return method(*args, **kwargs)
+ except ValueError:
+ return None
+
+ return on_call
+
+
+HOST_PREFIX = os.getenv('NETDATA_HOST_PREFIX')
+ETC_PASSWD_PATH = '/etc/passwd'
+PROC_PATH = '/proc'
+
+IS_INSIDE_DOCKER = False
+
+if HOST_PREFIX:
+ ETC_PASSWD_PATH = os.path.join(HOST_PREFIX, ETC_PASSWD_PATH[1:])
+ PROC_PATH = os.path.join(HOST_PREFIX, PROC_PATH[1:])
+ IS_INSIDE_DOCKER = True
+
+
+def read_passwd_file():
+ data = dict()
+ with open(ETC_PASSWD_PATH, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith("#"):
+ continue
+ fields = line.split(":")
+ # name, passwd, uid, gid, comment, home_dir, shell
+ if len(fields) != 7:
+ continue
+ # uid, guid
+ fields[2], fields[3] = int(fields[2]), int(fields[3])
+ data[fields[2]] = fields
+ return data
+
+
+def read_passwd_file_safe():
+ try:
+ if IS_INSIDE_DOCKER:
+ return read_passwd_file()
+ return dict((k[2], k) for k in pwd.getpwall())
+ except (OSError, IOError):
+ return dict()
+
+
+def get_username_by_pid_safe(pid, passwd_file):
+ path = os.path.join(PROC_PATH, pid)
+ try:
+ uid = os.stat(path).st_uid
+ except (OSError, IOError):
+ return ''
+ try:
+ if IS_INSIDE_DOCKER:
+ return passwd_file[uid][0]
+ return pwd.getpwuid(uid)[0]
+ except KeyError:
+ return str(uid)
+
+
+class GPU:
+ def __init__(self, num, root, exclude_zero_memory_users=False):
+ self.num = num
+ self.root = root
+ self.exclude_zero_memory_users = exclude_zero_memory_users
+
+ def id(self):
+ return self.root.get('id')
+
+ def name(self):
+ return self.root.find('product_name').text
+
+ def full_name(self):
+ return 'gpu{0} {1}'.format(self.num, self.name())
+
+ @handle_attr_error
+ def pci_link_gen(self):
+ return self.root.find('pci').find('pci_gpu_link_info').find('pcie_gen').find('max_link_gen').text
+
+ @handle_attr_error
+ def pci_link_width(self):
+ info = self.root.find('pci').find('pci_gpu_link_info')
+ return info.find('link_widths').find('max_link_width').text.split('x')[0]
+
+ def pci_bw_max(self):
+ link_gen = self.pci_link_gen()
+ link_width = int(self.pci_link_width())
+ if link_gen not in PCI_SPEED or link_gen not in PCI_ENCODING or not link_width:
+ return None
+ # Maximum PCIe Bandwidth = SPEED * WIDTH * (1 - ENCODING) - 1Gb/s.
+ # see details https://enterprise-support.nvidia.com/s/article/understanding-pcie-configuration-for-maximum-performance
+ # return max bandwidth in kilobytes per second (kB/s)
+ return (PCI_SPEED[link_gen] * link_width * (1 - PCI_ENCODING[link_gen]) - 1) * 1000 * 1000 / 8
+
+ @handle_attr_error
+ def rx_util(self):
+ return self.root.find('pci').find('rx_util').text.split()[0]
+
+ @handle_attr_error
+ def tx_util(self):
+ return self.root.find('pci').find('tx_util').text.split()[0]
+
+ @handle_attr_error
+ def fan_speed(self):
+ return self.root.find('fan_speed').text.split()[0]
+
+ @handle_attr_error
+ def gpu_util(self):
+ return self.root.find('utilization').find('gpu_util').text.split()[0]
+
+ @handle_attr_error
+ def memory_util(self):
+ return self.root.find('utilization').find('memory_util').text.split()[0]
+
+ @handle_attr_error
+ def encoder_util(self):
+ return self.root.find('utilization').find('encoder_util').text.split()[0]
+
+ @handle_attr_error
+ def decoder_util(self):
+ return self.root.find('utilization').find('decoder_util').text.split()[0]
+
+ @handle_attr_error
+ def fb_memory_used(self):
+ return self.root.find('fb_memory_usage').find('used').text.split()[0]
+
+ @handle_attr_error
+ def fb_memory_free(self):
+ return self.root.find('fb_memory_usage').find('free').text.split()[0]
+
+ @handle_attr_error
+ def bar1_memory_used(self):
+ return self.root.find('bar1_memory_usage').find('used').text.split()[0]
+
+ @handle_attr_error
+ def bar1_memory_free(self):
+ return self.root.find('bar1_memory_usage').find('free').text.split()[0]
+
+ @handle_attr_error
+ def temperature(self):
+ return self.root.find('temperature').find('gpu_temp').text.split()[0]
+
+ @handle_attr_error
+ def graphics_clock(self):
+ return self.root.find('clocks').find('graphics_clock').text.split()[0]
+
+ @handle_attr_error
+ def video_clock(self):
+ return self.root.find('clocks').find('video_clock').text.split()[0]
+
+ @handle_attr_error
+ def sm_clock(self):
+ return self.root.find('clocks').find('sm_clock').text.split()[0]
+
+ @handle_attr_error
+ def mem_clock(self):
+ return self.root.find('clocks').find('mem_clock').text.split()[0]
+
+ @handle_attr_error
+ def power_readings(self):
+ elem = self.root.find('power_readings')
+ return elem if elem else self.root.find('gpu_power_readings')
+
+ @handle_attr_error
+ def power_state(self):
+ return str(self.power_readings().find('power_state').text.split()[0])
+
+ @handle_value_error
+ @handle_attr_error
+ def power_draw(self):
+ return float(self.power_readings().find('power_draw').text.split()[0]) * 100
+
+ @handle_attr_error
+ def processes(self):
+ processes_info = self.root.find('processes').findall('process_info')
+ if not processes_info:
+ return list()
+
+ passwd_file = read_passwd_file_safe()
+ processes = list()
+
+ for info in processes_info:
+ pid = info.find('pid').text
+ processes.append({
+ 'pid': int(pid),
+ 'process_name': info.find('process_name').text,
+ 'used_memory': int(info.find('used_memory').text.split()[0]),
+ 'username': get_username_by_pid_safe(pid, passwd_file),
+ })
+ return processes
+
+ def data(self):
+ data = {
+ 'rx_util': self.rx_util(),
+ 'tx_util': self.tx_util(),
+ 'fan_speed': self.fan_speed(),
+ 'gpu_util': self.gpu_util(),
+ 'memory_util': self.memory_util(),
+ 'encoder_util': self.encoder_util(),
+ 'decoder_util': self.decoder_util(),
+ 'fb_memory_used': self.fb_memory_used(),
+ 'fb_memory_free': self.fb_memory_free(),
+ 'bar1_memory_used': self.bar1_memory_used(),
+ 'bar1_memory_free': self.bar1_memory_free(),
+ 'gpu_temp': self.temperature(),
+ 'graphics_clock': self.graphics_clock(),
+ 'video_clock': self.video_clock(),
+ 'sm_clock': self.sm_clock(),
+ 'mem_clock': self.mem_clock(),
+ 'power_draw': self.power_draw(),
+ }
+
+ if self.rx_util() != NOT_AVAILABLE and self.tx_util() != NOT_AVAILABLE:
+ pci_bw_max = self.pci_bw_max()
+ if not pci_bw_max:
+ data['rx_util_percent'] = 0
+ data['tx_util_percent'] = 0
+ else:
+ data['rx_util_percent'] = str(int(int(self.rx_util()) * 100 / self.pci_bw_max()))
+ data['tx_util_percent'] = str(int(int(self.tx_util()) * 100 / self.pci_bw_max()))
+
+ for v in POWER_STATES:
+ data['power_state_' + v.lower()] = 0
+ p_state = self.power_state()
+ if p_state:
+ data['power_state_' + p_state.lower()] = 1
+
+ processes = self.processes() or []
+ users = set()
+ for p in processes:
+ data['process_mem_{0}'.format(p['pid'])] = p['used_memory']
+ if p['username']:
+ if self.exclude_zero_memory_users and p['used_memory'] == 0:
+ continue
+ users.add(p['username'])
+ key = 'user_mem_{0}'.format(p['username'])
+ if key in data:
+ data[key] += p['used_memory']
+ else:
+ data[key] = p['used_memory']
+ data['user_num'] = len(users)
+
+ return dict(('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items())
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.loop_mode = configuration.get('loop_mode', True)
+ poll = int(configuration.get('poll_seconds', self.get_update_every()))
+ self.exclude_zero_memory_users = configuration.get('exclude_zero_memory_users', False)
+ self.poller = NvidiaSMIPoller(poll)
+
+ def get_data_loop_mode(self):
+ if not self.poller.is_started():
+ self.poller.start()
+
+ if not self.poller.is_alive():
+ self.debug('poller is off')
+ return None
+
+ return self.poller.data()
+
+ def get_data_normal_mode(self):
+ return self.poller.run_once()
+
+ def get_data(self):
+ if self.loop_mode:
+ last_data = self.get_data_loop_mode()
+ else:
+ last_data = self.get_data_normal_mode()
+
+ if not last_data:
+ return None
+
+ parsed = self.parse_xml(last_data)
+ if parsed is None:
+ return None
+
+ data = dict()
+ for idx, root in enumerate(parsed.findall('gpu')):
+ gpu = GPU(idx, root, self.exclude_zero_memory_users)
+ gpu_data = gpu.data()
+ # self.debug(gpu_data)
+ gpu_data = dict((k, v) for k, v in gpu_data.items() if is_gpu_data_value_valid(v))
+ data.update(gpu_data)
+ self.update_processes_mem_chart(gpu)
+ self.update_processes_user_mem_chart(gpu)
+
+ return data or None
+
+ def update_processes_mem_chart(self, gpu):
+ ps = gpu.processes()
+ if not ps:
+ return
+ chart = self.charts['gpu{0}_{1}'.format(gpu.num, PROCESSES_MEM)]
+ active_dim_ids = []
+ for p in ps:
+ dim_id = 'gpu{0}_process_mem_{1}'.format(gpu.num, p['pid'])
+ active_dim_ids.append(dim_id)
+ if dim_id not in chart:
+ chart.add_dimension([dim_id, '{0} {1}'.format(p['pid'], p['process_name'])])
+ for dim in chart:
+ if dim.id not in active_dim_ids:
+ chart.del_dimension(dim.id, hide=False)
+
+ def update_processes_user_mem_chart(self, gpu):
+ ps = gpu.processes()
+ if not ps:
+ return
+ chart = self.charts['gpu{0}_{1}'.format(gpu.num, USER_MEM)]
+ active_dim_ids = []
+ for p in ps:
+ if not p.get('username'):
+ continue
+ dim_id = 'gpu{0}_user_mem_{1}'.format(gpu.num, p['username'])
+ active_dim_ids.append(dim_id)
+ if dim_id not in chart:
+ chart.add_dimension([dim_id, '{0}'.format(p['username'])])
+
+ for dim in chart:
+ if dim.id not in active_dim_ids:
+ chart.del_dimension(dim.id, hide=False)
+
+ def check(self):
+ if not self.poller.has_smi():
+ self.error("couldn't find '{0}' binary".format(NVIDIA_SMI))
+ return False
+
+ raw_data = self.poller.run_once()
+ if not raw_data:
+ self.error("failed to invoke '{0}' binary".format(NVIDIA_SMI))
+ return False
+
+ parsed = self.parse_xml(raw_data)
+ if parsed is None:
+ return False
+
+ gpus = parsed.findall('gpu')
+ if not gpus:
+ return False
+
+ self.create_charts(gpus)
+
+ return True
+
+ def parse_xml(self, data):
+ try:
+ return et.fromstring(data)
+ except et.ParseError as error:
+ self.error('xml parse failed: "{0}", error: {1}'.format(data, error))
+
+ return None
+
+ def create_charts(self, gpus):
+ for idx, root in enumerate(gpus):
+ order, charts = gpu_charts(GPU(idx, root))
+ self.order.extend(order)
+ self.definitions.update(charts)
+
+
+def is_gpu_data_value_valid(value):
+ try:
+ int(value)
+ except (TypeError, ValueError):
+ return False
+ return True
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
new file mode 100644
index 00000000..3d2a30d4
--- /dev/null
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for nvidia_smi
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# loop_mode: yes/no # default is yes. If set to yes `nvidia-smi` is executed in a separate thread using `-l` option.
+# poll_seconds: SECONDS # default is 1. Sets the frequency of seconds the nvidia-smi tool is polled in loop mode.
+# exclude_zero_memory_users: yes/no # default is no. Whether to collect users metrics with 0Mb memory allocation.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/openldap/Makefile.inc b/collectors/python.d.plugin/openldap/Makefile.inc
new file mode 100644
index 00000000..dc947e21
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += openldap/openldap.chart.py
+dist_pythonconfig_DATA += openldap/openldap.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += openldap/README.md openldap/Makefile.inc
+
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
new file mode 120000
index 00000000..45f36b9b
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/README.md
@@ -0,0 +1 @@
+integrations/openldap.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/openldap/integrations/openldap.md b/collectors/python.d.plugin/openldap/integrations/openldap.md
new file mode 100644
index 00000000..a9480a49
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/integrations/openldap.md
@@ -0,0 +1,215 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/openldap/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/openldap/metadata.yaml"
+sidebar_label: "OpenLDAP"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenLDAP
+
+
+<img src="https://netdata.cloud/img/statsd.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: openldap
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors OpenLDAP metrics about connections, operations, referrals and more.
+
+Statistics are taken from the monitoring interface of a openLDAP (slapd) server
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This collector doesn't work until all the prerequisites are checked.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per OpenLDAP instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| openldap.total_connections | connections | connections/s |
+| openldap.traffic_stats | sent | KiB/s |
+| openldap.operations_status | completed, initiated | ops/s |
+| openldap.referrals | sent | referrals/s |
+| openldap.entries | sent | entries/s |
+| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |
+| openldap.waiters | write, read | waiters/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure the openLDAP server to expose metrics to monitor it.
+
+Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.
+
+
+#### Install python-ldap module
+
+Install python ldap module
+
+1. From pip package manager
+
+```bash
+pip install ldap
+```
+
+2. With apt package manager (in most deb based distros)
+
+
+```bash
+apt-get install python-ldap
+```
+
+
+3. With yum package manager (in most rpm based distros)
+
+
+```bash
+yum install python-ldap
+```
+
+
+#### Insert credentials for Netdata to access openLDAP server
+
+Use the `ldappasswd` utility to set a password for the username you will use.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/openldap.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/openldap.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| username | The bind user with right to access monitor statistics | | yes |
+| password | The password for the binded user | | yes |
+| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |
+| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |
+| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |
+| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |
+| cert_check | False if you want to ignore certificate check | True | yes |
+| timeout | Seconds to timeout if no connection exist | | yes |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+username: "cn=admin"
+password: "pass"
+server: "localhost"
+port: "389"
+check_cert: True
+timeout: 1
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin openldap debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/openldap/metadata.yaml b/collectors/python.d.plugin/openldap/metadata.yaml
new file mode 100644
index 00000000..3826b22c
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/metadata.yaml
@@ -0,0 +1,225 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: openldap
+ monitored_instance:
+ name: OpenLDAP
+ link: "https://www.openldap.org/"
+ categories:
+ - data-collection.authentication-and-authorization
+ icon_filename: "statsd.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - openldap
+ - RBAC
+ - Directory access
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors OpenLDAP metrics about connections, operations, referrals and more."
+ method_description: |
+ Statistics are taken from the monitoring interface of a openLDAP (slapd) server
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ This collector doesn't work until all the prerequisites are checked.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Configure the openLDAP server to expose metrics to monitor it.
+ description: |
+ Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.
+ - title: Install python-ldap module
+ description: |
+ Install python ldap module
+
+ 1. From pip package manager
+
+ ```bash
+ pip install ldap
+ ```
+
+ 2. With apt package manager (in most deb based distros)
+
+
+ ```bash
+ apt-get install python-ldap
+ ```
+
+
+ 3. With yum package manager (in most rpm based distros)
+
+
+ ```bash
+ yum install python-ldap
+ ```
+ - title: Insert credentials for Netdata to access openLDAP server
+ description: |
+ Use the `ldappasswd` utility to set a password for the username you will use.
+ configuration:
+ file:
+ name: "python.d/openldap.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: username
+ description: The bind user with right to access monitor statistics
+ default_value: ""
+ required: true
+ - name: password
+ description: The password for the binded user
+ default_value: ""
+ required: true
+ - name: server
+ description: The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
+ default_value: ""
+ required: true
+ - name: port
+ description: The listening port of the LDAP server. Change to 636 port in case of TLS connection.
+ default_value: "389"
+ required: true
+ - name: use_tls
+ description: Make True if a TLS connection is used over ldaps://
+ default_value: False
+ required: false
+ - name: use_start_tls
+ description: Make True if a TLS connection is used over ldap://
+ default_value: False
+ required: false
+ - name: cert_check
+ description: False if you want to ignore certificate check
+ default_value: "True"
+ required: true
+ - name: timeout
+ description: Seconds to timeout if no connection exist
+ default_value: ""
+ required: true
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ username: "cn=admin"
+ password: "pass"
+ server: "localhost"
+ port: "389"
+ check_cert: True
+ timeout: 1
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: openldap.total_connections
+ description: Total Connections
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: openldap.traffic_stats
+ description: Traffic
+ unit: "KiB/s"
+ chart_type: line
+ dimensions:
+ - name: sent
+ - name: openldap.operations_status
+ description: Operations Status
+ unit: "ops/s"
+ chart_type: line
+ dimensions:
+ - name: completed
+ - name: initiated
+ - name: openldap.referrals
+ description: Referrals
+ unit: "referrals/s"
+ chart_type: line
+ dimensions:
+ - name: sent
+ - name: openldap.entries
+ description: Entries
+ unit: "entries/s"
+ chart_type: line
+ dimensions:
+ - name: sent
+ - name: openldap.ldap_operations
+ description: Operations
+ unit: "ops/s"
+ chart_type: line
+ dimensions:
+ - name: bind
+ - name: search
+ - name: unbind
+ - name: add
+ - name: delete
+ - name: modify
+ - name: compare
+ - name: openldap.waiters
+ description: Waiters
+ unit: "waiters/s"
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: read
diff --git a/collectors/python.d.plugin/openldap/openldap.chart.py b/collectors/python.d.plugin/openldap/openldap.chart.py
new file mode 100644
index 00000000..aba14395
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/openldap.chart.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+# Description: openldap netdata python.d module
+# Author: Manolis Kartsonakis (ekartsonakis)
+# SPDX-License-Identifier: GPL-3.0+
+
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ HAS_LDAP = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+DEFAULT_SERVER = 'localhost'
+DEFAULT_PORT = '389'
+DEFAULT_TLS = False
+DEFAULT_CERT_CHECK = True
+DEFAULT_TIMEOUT = 1
+DEFAULT_START_TLS = False
+
+ORDER = [
+ 'total_connections',
+ 'bytes_sent',
+ 'operations',
+ 'referrals_sent',
+ 'entries_sent',
+ 'ldap_operations',
+ 'waiters'
+]
+
+CHARTS = {
+ 'total_connections': {
+ 'options': [None, 'Total Connections', 'connections/s', 'ldap', 'openldap.total_connections', 'line'],
+ 'lines': [
+ ['total_connections', 'connections', 'incremental']
+ ]
+ },
+ 'bytes_sent': {
+ 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'],
+ 'lines': [
+ ['bytes_sent', 'sent', 'incremental', 1, 1024]
+ ]
+ },
+ 'operations': {
+ 'options': [None, 'Operations Status', 'ops/s', 'ldap', 'openldap.operations_status', 'line'],
+ 'lines': [
+ ['completed_operations', 'completed', 'incremental'],
+ ['initiated_operations', 'initiated', 'incremental']
+ ]
+ },
+ 'referrals_sent': {
+ 'options': [None, 'Referrals', 'referrals/s', 'ldap', 'openldap.referrals', 'line'],
+ 'lines': [
+ ['referrals_sent', 'sent', 'incremental']
+ ]
+ },
+ 'entries_sent': {
+ 'options': [None, 'Entries', 'entries/s', 'ldap', 'openldap.entries', 'line'],
+ 'lines': [
+ ['entries_sent', 'sent', 'incremental']
+ ]
+ },
+ 'ldap_operations': {
+ 'options': [None, 'Operations', 'ops/s', 'ldap', 'openldap.ldap_operations', 'line'],
+ 'lines': [
+ ['bind_operations', 'bind', 'incremental'],
+ ['search_operations', 'search', 'incremental'],
+ ['unbind_operations', 'unbind', 'incremental'],
+ ['add_operations', 'add', 'incremental'],
+ ['delete_operations', 'delete', 'incremental'],
+ ['modify_operations', 'modify', 'incremental'],
+ ['compare_operations', 'compare', 'incremental']
+ ]
+ },
+ 'waiters': {
+ 'options': [None, 'Waiters', 'waiters/s', 'ldap', 'openldap.waiters', 'line'],
+ 'lines': [
+ ['write_waiters', 'write', 'incremental'],
+ ['read_waiters', 'read', 'incremental']
+ ]
+ },
+}
+
+# Stuff to gather - make tuples of DN dn and attrib to get
+SEARCH_LIST = {
+ 'total_connections': (
+ 'cn=Total,cn=Connections,cn=Monitor', 'monitorCounter',
+ ),
+ 'bytes_sent': (
+ 'cn=Bytes,cn=Statistics,cn=Monitor', 'monitorCounter',
+ ),
+ 'completed_operations': (
+ 'cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'initiated_operations': (
+ 'cn=Operations,cn=Monitor', 'monitorOpInitiated',
+ ),
+ 'referrals_sent': (
+ 'cn=Referrals,cn=Statistics,cn=Monitor', 'monitorCounter',
+ ),
+ 'entries_sent': (
+ 'cn=Entries,cn=Statistics,cn=Monitor', 'monitorCounter',
+ ),
+ 'bind_operations': (
+ 'cn=Bind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'unbind_operations': (
+ 'cn=Unbind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'add_operations': (
+ 'cn=Add,cn=Operations,cn=Monitor', 'monitorOpInitiated',
+ ),
+ 'delete_operations': (
+ 'cn=Delete,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'modify_operations': (
+ 'cn=Modify,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'compare_operations': (
+ 'cn=Compare,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'search_operations': (
+ 'cn=Search,cn=Operations,cn=Monitor', 'monitorOpCompleted',
+ ),
+ 'write_waiters': (
+ 'cn=Write,cn=Waiters,cn=Monitor', 'monitorCounter',
+ ),
+ 'read_waiters': (
+ 'cn=Read,cn=Waiters,cn=Monitor', 'monitorCounter',
+ ),
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.server = configuration.get('server', DEFAULT_SERVER)
+ self.port = configuration.get('port', DEFAULT_PORT)
+ self.username = configuration.get('username')
+ self.password = configuration.get('password')
+ self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
+ self.use_tls = configuration.get('use_tls', DEFAULT_TLS)
+ self.cert_check = configuration.get('cert_check', DEFAULT_CERT_CHECK)
+ self.use_start_tls = configuration.get('use_start_tls', DEFAULT_START_TLS)
+ self.alive = False
+ self.conn = None
+
+ def disconnect(self):
+ if self.conn:
+ self.conn.unbind()
+ self.conn = None
+ self.alive = False
+
+ def connect(self):
+ try:
+ if self.use_tls:
+ self.conn = ldap.initialize('ldaps://%s:%s' % (self.server, self.port))
+ else:
+ self.conn = ldap.initialize('ldap://%s:%s' % (self.server, self.port))
+ self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, self.timeout)
+ if (self.use_tls or self.use_start_tls) and not self.cert_check:
+ self.conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+ if self.use_start_tls or self.use_tls:
+ self.conn.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
+ if self.use_start_tls:
+ self.conn.protocol_version = ldap.VERSION3
+ self.conn.start_tls_s()
+ if self.username and self.password:
+ self.conn.simple_bind(self.username, self.password)
+ except ldap.LDAPError as error:
+ self.error(error)
+ return False
+
+ self.alive = True
+ return True
+
+ def reconnect(self):
+ self.disconnect()
+ return self.connect()
+
+ def check(self):
+ if not HAS_LDAP:
+ self.error("'python-ldap' package is needed")
+ return None
+
+ return self.connect() and self.get_data()
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ data = dict()
+ for key in SEARCH_LIST:
+ dn = SEARCH_LIST[key][0]
+ attr = SEARCH_LIST[key][1]
+ try:
+ num = self.conn.search(dn, ldap.SCOPE_BASE, 'objectClass=*', [attr, ])
+ result_type, result_data = self.conn.result(num, 1)
+ except ldap.LDAPError as error:
+ self.error("Empty result. Check bind username/password. Message: ", error)
+ self.alive = False
+ return None
+
+ if result_type != 101:
+ continue
+
+ try:
+ data[key] = int(list(result_data[0][1].values())[0][0])
+ except (ValueError, IndexError) as error:
+ self.debug(error)
+ continue
+
+ return data
diff --git a/collectors/python.d.plugin/openldap/openldap.conf b/collectors/python.d.plugin/openldap/openldap.conf
new file mode 100644
index 00000000..5fd99a52
--- /dev/null
+++ b/collectors/python.d.plugin/openldap/openldap.conf
@@ -0,0 +1,75 @@
+# netdata python.d.plugin configuration for openldap
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# postfix is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# ----------------------------------------------------------------------
+# OPENLDAP EXTRA PARAMETERS
+
+# Set here your LDAP connection settings
+
+#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics
+#password : "yourpass" # The password for the binded user
+#server : 'localhost' # The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
+#port : 389 # The listening port of the LDAP server. Change to 636 port in case of TLS connection
+#use_tls : False # Make True if a TLS connection is used over ldaps://
+#use_start_tls: False # Make True if a TLS connection is used over ldap://
+#cert_check : True # False if you want to ignore certificate check
+#timeout : 1 # Seconds to timeout if no connection exi
diff --git a/collectors/python.d.plugin/oracledb/Makefile.inc b/collectors/python.d.plugin/oracledb/Makefile.inc
new file mode 100644
index 00000000..ea3a8240
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += oracledb/oracledb.chart.py
+dist_pythonconfig_DATA += oracledb/oracledb.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += oracledb/README.md oracledb/Makefile.inc
+
diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md
new file mode 120000
index 00000000..a75e3611
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/README.md
@@ -0,0 +1 @@
+integrations/oracle_db.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/oracledb/integrations/oracle_db.md b/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
new file mode 100644
index 00000000..30557c02
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
@@ -0,0 +1,226 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/oracledb/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/oracledb/metadata.yaml"
+sidebar_label: "Oracle DB"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Oracle DB
+
+
+<img src="https://netdata.cloud/img/oracle.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: oracledb
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors OracleDB database metrics about sessions, tables, memory and more.
+
+It collects the metrics via the supported database client library
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+In order for this collector to work, it needs a read-only user `netdata` in the RDBMS.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+When the requirements are met, databases on the local host on port 1521 will be auto-detected
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+These metrics refer to the entire monitored application.
+
+### Per Oracle DB instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| oracledb.session_count | total, active | sessions |
+| oracledb.session_limit_usage | usage | % |
+| oracledb.logons | logons | events/s |
+| oracledb.physical_disk_read_writes | reads, writes | events/s |
+| oracledb.sorts_on_disks | sorts | events/s |
+| oracledb.full_table_scans | full table scans | events/s |
+| oracledb.database_wait_time_ratio | wait time ratio | % |
+| oracledb.shared_pool_free_memory | free memory | % |
+| oracledb.in_memory_sorts_ratio | in-memory sorts | % |
+| oracledb.sql_service_response_time | time | seconds |
+| oracledb.user_rollbacks | rollbacks | events/s |
+| oracledb.enqueue_timeouts | enqueue timeouts | events/s |
+| oracledb.cache_hit_ration | buffer, cursor, library, row | % |
+| oracledb.global_cache_blocks | corrupted, lost | events/s |
+| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |
+| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |
+| oracledb.tablespace_size | a dimension per active tablespace | KiB |
+| oracledb.tablespace_usage | a dimension per active tablespace | KiB |
+| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |
+| oracledb.allocated_size | a dimension per active tablespace | B |
+| oracledb.allocated_usage | a dimension per active tablespace | B |
+| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install the python-oracledb package
+
+You can follow the official guide below to install the required package:
+
+Source: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html
+
+
+#### Create a read only user for netdata
+
+Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach
+
+Connect to your Oracle database with an administrative user and execute:
+
+```bash
+CREATE USER netdata IDENTIFIED BY <PASSWORD>;
+
+GRANT CONNECT TO netdata;
+GRANT SELECT_CATALOG_ROLE TO netdata;
+```
+
+
+#### Edit the configuration
+
+Edit the configuration troubleshooting:
+
+1. Provide a valid user for the netdata collector to access the database
+2. Specify the network target this database is listening.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/oracledb.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/oracledb.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| user | The username for the user account. | no | yes |
+| password | The password for the user account. | no | yes |
+| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |
+| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |
+| protocol | one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration, two jobs described for two databases.
+
+```yaml
+local:
+ user: 'netdata'
+ password: 'secret'
+ server: 'localhost:1521'
+ service: 'XE'
+ protocol: 'tcps'
+
+remote:
+ user: 'netdata'
+ password: 'secret'
+ server: '10.0.0.1:1521'
+ service: 'XE'
+ protocol: 'tcps'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin oracledb debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/oracledb/metadata.yaml b/collectors/python.d.plugin/oracledb/metadata.yaml
new file mode 100644
index 00000000..f2ab8312
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/metadata.yaml
@@ -0,0 +1,309 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: oracledb
+ monitored_instance:
+ name: Oracle DB
+ link: "https://docs.oracle.com/en/database/oracle/oracle-database/"
+ categories:
+ - data-collection.database-servers
+ icon_filename: "oracle.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - database
+ - oracle
+ - data warehouse
+ - SQL
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors OracleDB database metrics about sessions, tables, memory and more."
+ method_description: "It collects the metrics via the supported database client library"
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: |
+ In order for this collector to work, it needs a read-only user `netdata` in the RDBMS.
+ default_behavior:
+ auto_detection:
+ description: "When the requirements are met, databases on the local host on port 1521 will be auto-detected"
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Install the python-oracledb package
+ description: |
+ You can follow the official guide below to install the required package:
+
+ Source: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html
+ - title: Create a read only user for netdata
+ description: |
+ Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach
+
+ Connect to your Oracle database with an administrative user and execute:
+
+ ```bash
+ CREATE USER netdata IDENTIFIED BY <PASSWORD>;
+
+ GRANT CONNECT TO netdata;
+ GRANT SELECT_CATALOG_ROLE TO netdata;
+ ```
+ - title: Edit the configuration
+ description: |
+ Edit the configuration troubleshooting:
+
+ 1. Provide a valid user for the netdata collector to access the database
+ 2. Specify the network target this database is listening.
+ configuration:
+ file:
+ name: "python.d/oracledb.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: user
+ description: The username for the user account.
+ default_value: no
+ required: true
+ - name: password
+ description: The password for the user account.
+ default_value: no
+ required: true
+ - name: server
+ description: The IP address or hostname (and port) of the Oracle Database Server.
+ default_value: no
+ required: true
+ - name: service
+ description: The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`.
+ default_value: no
+ required: true
+ - name: protocol
+ description: one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic
+ default_value: no
+ required: true
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration, two jobs described for two databases.
+ config: |
+ local:
+ user: 'netdata'
+ password: 'secret'
+ server: 'localhost:1521'
+ service: 'XE'
+ protocol: 'tcps'
+
+ remote:
+ user: 'netdata'
+ password: 'secret'
+ server: '10.0.0.1:1521'
+ service: 'XE'
+ protocol: 'tcps'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: "These metrics refer to the entire monitored application."
+ availability: []
+ scopes:
+ - name: global
+ description: ""
+ labels: []
+ metrics:
+ - name: oracledb.session_count
+ description: Session Count
+ unit: "sessions"
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: active
+ - name: oracledb.session_limit_usage
+ description: Session Limit Usage
+ unit: "%"
+ chart_type: area
+ dimensions:
+ - name: usage
+ - name: oracledb.logons
+ description: Logons
+ unit: "events/s"
+ chart_type: area
+ dimensions:
+ - name: logons
+ - name: oracledb.physical_disk_read_writes
+ description: Physical Disk Reads/Writes
+ unit: "events/s"
+ chart_type: area
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: oracledb.sorts_on_disks
+ description: Sorts On Disk
+ unit: "events/s"
+ chart_type: line
+ dimensions:
+ - name: sorts
+ - name: oracledb.full_table_scans
+ description: Full Table Scans
+ unit: "events/s"
+ chart_type: line
+ dimensions:
+ - name: full table scans
+ - name: oracledb.database_wait_time_ratio
+ description: Database Wait Time Ratio
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: wait time ratio
+ - name: oracledb.shared_pool_free_memory
+ description: Shared Pool Free Memory
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: free memory
+ - name: oracledb.in_memory_sorts_ratio
+ description: In-Memory Sorts Ratio
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: in-memory sorts
+ - name: oracledb.sql_service_response_time
+ description: SQL Service Response Time
+ unit: "seconds"
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: oracledb.user_rollbacks
+ description: User Rollbacks
+ unit: "events/s"
+ chart_type: line
+ dimensions:
+ - name: rollbacks
+ - name: oracledb.enqueue_timeouts
+ description: Enqueue Timeouts
+ unit: "events/s"
+ chart_type: line
+ dimensions:
+ - name: enqueue timeouts
+ - name: oracledb.cache_hit_ration
+ description: Cache Hit Ratio
+ unit: "%"
+ chart_type: stacked
+ dimensions:
+ - name: buffer
+ - name: cursor
+ - name: library
+ - name: row
+ - name: oracledb.global_cache_blocks
+ description: Global Cache Blocks Events
+ unit: "events/s"
+ chart_type: area
+ dimensions:
+ - name: corrupted
+ - name: lost
+ - name: oracledb.activity
+ description: Activities
+ unit: "events/s"
+ chart_type: stacked
+ dimensions:
+ - name: parse count
+ - name: execute count
+ - name: user commits
+ - name: user rollbacks
+ - name: oracledb.wait_time
+ description: Wait Time
+ unit: "ms"
+ chart_type: stacked
+ dimensions:
+ - name: application
+ - name: configuration
+ - name: administrative
+ - name: concurrency
+ - name: commit
+ - name: network
+ - name: user I/O
+ - name: system I/O
+ - name: scheduler
+ - name: other
+ - name: oracledb.tablespace_size
+ description: Size
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: a dimension per active tablespace
+ - name: oracledb.tablespace_usage
+ description: Usage
+ unit: "KiB"
+ chart_type: line
+ dimensions:
+ - name: a dimension per active tablespace
+ - name: oracledb.tablespace_usage_in_percent
+ description: Usage
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: a dimension per active tablespace
+ - name: oracledb.allocated_size
+ description: Size
+ unit: "B"
+ chart_type: line
+ dimensions:
+ - name: a dimension per active tablespace
+ - name: oracledb.allocated_usage
+ description: Usage
+ unit: "B"
+ chart_type: line
+ dimensions:
+ - name: a dimension per active tablespace
+ - name: oracledb.allocated_usage_in_percent
+ description: Usage
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: a dimension per active tablespace
diff --git a/collectors/python.d.plugin/oracledb/oracledb.chart.py b/collectors/python.d.plugin/oracledb/oracledb.chart.py
new file mode 100644
index 00000000..455cf270
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/oracledb.chart.py
@@ -0,0 +1,846 @@
+# -*- coding: utf-8 -*-
+# Description: oracledb netdata python.d module
+# Author: ilyam8 (Ilya Mashchenko)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from copy import deepcopy
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+try:
+ import oracledb as cx_Oracle
+
+ HAS_ORACLE_NEW = True
+ HAS_ORACLE_OLD = False
+except ImportError:
+ HAS_ORACLE_NEW = False
+ try:
+ import cx_Oracle
+
+ HAS_ORACLE_OLD = True
+ except ImportError:
+ HAS_ORACLE_OLD = False
+
+ORDER = [
+ 'session_count',
+ 'session_limit_usage',
+ 'logons',
+ 'physical_disk_read_write',
+ 'sorts_on_disk',
+ 'full_table_scans',
+ 'database_wait_time_ratio',
+ 'shared_pool_free_memory',
+ 'in_memory_sorts_ratio',
+ 'sql_service_response_time',
+ 'user_rollbacks',
+ 'enqueue_timeouts',
+ 'cache_hit_ratio',
+ 'global_cache_blocks',
+ 'activity',
+ 'wait_time',
+ 'tablespace_size',
+ 'tablespace_usage',
+ 'tablespace_usage_in_percent',
+ 'allocated_size',
+ 'allocated_usage',
+ 'allocated_usage_in_percent',
+]
+
+CHARTS = {
+ 'session_count': {
+ 'options': [None, 'Session Count', 'sessions', 'session activity', 'oracledb.session_count', 'line'],
+ 'lines': [
+ ['session_count', 'total', 'absolute', 1, 1000],
+ ['average_active_sessions', 'active', 'absolute', 1, 1000],
+ ]
+ },
+ 'session_limit_usage': {
+ 'options': [None, 'Session Limit Usage', '%', 'session activity', 'oracledb.session_limit_usage', 'area'],
+ 'lines': [
+ ['session_limit_percent', 'usage', 'absolute', 1, 1000],
+ ]
+ },
+ 'logons': {
+ 'options': [None, 'Logons', 'events/s', 'session activity', 'oracledb.logons', 'area'],
+ 'lines': [
+ ['logons_per_sec', 'logons', 'absolute', 1, 1000],
+ ]
+ },
+ 'physical_disk_read_write': {
+ 'options': [None, 'Physical Disk Reads/Writes', 'events/s', 'disk activity',
+ 'oracledb.physical_disk_read_writes', 'area'],
+ 'lines': [
+ ['physical_reads_per_sec', 'reads', 'absolute', 1, 1000],
+ ['physical_writes_per_sec', 'writes', 'absolute', -1, 1000],
+ ]
+ },
+ 'sorts_on_disk': {
+ 'options': [None, 'Sorts On Disk', 'events/s', 'disk activity', 'oracledb.sorts_on_disks', 'line'],
+ 'lines': [
+ ['disk_sort_per_sec', 'sorts', 'absolute', 1, 1000],
+ ]
+ },
+ 'full_table_scans': {
+ 'options': [None, 'Full Table Scans', 'events/s', 'disk activity', 'oracledb.full_table_scans', 'line'],
+ 'lines': [
+ ['long_table_scans_per_sec', 'full table scans', 'absolute', 1, 1000],
+ ]
+ },
+ 'database_wait_time_ratio': {
+ 'options': [None, 'Database Wait Time Ratio', '%', 'database and buffer activity',
+ 'oracledb.database_wait_time_ratio', 'line'],
+ 'lines': [
+ ['database_wait_time_ratio', 'wait time ratio', 'absolute', 1, 1000],
+ ]
+ },
+ 'shared_pool_free_memory': {
+ 'options': [None, 'Shared Pool Free Memory', '%', 'database and buffer activity',
+ 'oracledb.shared_pool_free_memory', 'line'],
+ 'lines': [
+ ['shared_pool_free_percent', 'free memory', 'absolute', 1, 1000],
+ ]
+ },
+ 'in_memory_sorts_ratio': {
+ 'options': [None, 'In-Memory Sorts Ratio', '%', 'database and buffer activity',
+ 'oracledb.in_memory_sorts_ratio', 'line'],
+ 'lines': [
+ ['memory_sorts_ratio', 'in-memory sorts', 'absolute', 1, 1000],
+ ]
+ },
+ 'sql_service_response_time': {
+ 'options': [None, 'SQL Service Response Time', 'seconds', 'database and buffer activity',
+ 'oracledb.sql_service_response_time', 'line'],
+ 'lines': [
+ ['sql_service_response_time', 'time', 'absolute', 1, 1000],
+ ]
+ },
+ 'user_rollbacks': {
+ 'options': [None, 'User Rollbacks', 'events/s', 'database and buffer activity',
+ 'oracledb.user_rollbacks', 'line'],
+ 'lines': [
+ ['user_rollbacks_per_sec', 'rollbacks', 'absolute', 1, 1000],
+ ]
+ },
+ 'enqueue_timeouts': {
+ 'options': [None, 'Enqueue Timeouts', 'events/s', 'database and buffer activity',
+ 'oracledb.enqueue_timeouts', 'line'],
+ 'lines': [
+ ['enqueue_timeouts_per_sec', 'enqueue timeouts', 'absolute', 1, 1000],
+ ]
+ },
+ 'cache_hit_ratio': {
+ 'options': [None, 'Cache Hit Ratio', '%', 'cache', 'oracledb.cache_hit_ration', 'stacked'],
+ 'lines': [
+ ['buffer_cache_hit_ratio', 'buffer', 'absolute', 1, 1000],
+ ['cursor_cache_hit_ratio', 'cursor', 'absolute', 1, 1000],
+ ['library_cache_hit_ratio', 'library', 'absolute', 1, 1000],
+ ['row_cache_hit_ratio', 'row', 'absolute', 1, 1000],
+ ]
+ },
+ 'global_cache_blocks': {
+ 'options': [None, 'Global Cache Blocks Events', 'events/s', 'cache', 'oracledb.global_cache_blocks', 'area'],
+ 'lines': [
+ ['global_cache_blocks_corrupted', 'corrupted', 'incremental', 1, 1000],
+ ['global_cache_blocks_lost', 'lost', 'incremental', 1, 1000],
+ ]
+ },
+ 'activity': {
+ 'options': [None, 'Activities', 'events/s', 'activities', 'oracledb.activity', 'stacked'],
+ 'lines': [
+ ['activity_parse_count_total', 'parse count', 'incremental', 1, 1000],
+ ['activity_execute_count', 'execute count', 'incremental', 1, 1000],
+ ['activity_user_commits', 'user commits', 'incremental', 1, 1000],
+ ['activity_user_rollbacks', 'user rollbacks', 'incremental', 1, 1000],
+ ]
+ },
+ 'wait_time': {
+ 'options': [None, 'Wait Time', 'ms', 'wait time', 'oracledb.wait_time', 'stacked'],
+ 'lines': [
+ ['wait_time_application', 'application', 'absolute', 1, 1000],
+ ['wait_time_configuration', 'configuration', 'absolute', 1, 1000],
+ ['wait_time_administrative', 'administrative', 'absolute', 1, 1000],
+ ['wait_time_concurrency', 'concurrency', 'absolute', 1, 1000],
+ ['wait_time_commit', 'commit', 'absolute', 1, 1000],
+ ['wait_time_network', 'network', 'absolute', 1, 1000],
+ ['wait_time_user_io', 'user I/O', 'absolute', 1, 1000],
+ ['wait_time_system_io', 'system I/O', 'absolute', 1, 1000],
+ ['wait_time_scheduler', 'scheduler', 'absolute', 1, 1000],
+ ['wait_time_other', 'other', 'absolute', 1, 1000],
+ ]
+ },
+ 'tablespace_size': {
+ 'options': [None, 'Size', 'KiB', 'tablespace', 'oracledb.tablespace_size', 'line'],
+ 'lines': [],
+ },
+ 'tablespace_usage': {
+ 'options': [None, 'Usage', 'KiB', 'tablespace', 'oracledb.tablespace_usage', 'line'],
+ 'lines': [],
+ },
+ 'tablespace_usage_in_percent': {
+ 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.tablespace_usage_in_percent', 'line'],
+ 'lines': [],
+ },
+ 'allocated_size': {
+ 'options': [None, 'Size', 'B', 'tablespace', 'oracledb.allocated_size', 'line'],
+ 'lines': [],
+ },
+ 'allocated_usage': {
+ 'options': [None, 'Usage', 'B', 'tablespace', 'oracledb.allocated_usage', 'line'],
+ 'lines': [],
+ },
+ 'allocated_usage_in_percent': {
+ 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.allocated_usage_in_percent', 'line'],
+ 'lines': [],
+ },
+}
+
+CX_CONNECT_STRING_OLD = "{0}/{1}@//{2}/{3}"
+
+QUERY_SYSTEM = '''
+SELECT
+ metric_name,
+ value
+FROM
+ gv$sysmetric
+ORDER BY
+ begin_time
+'''
+QUERY_TABLESPACE = '''
+SELECT
+ m.tablespace_name,
+ m.used_space * t.block_size AS used_bytes,
+ m.tablespace_size * t.block_size AS max_bytes,
+ m.used_percent
+FROM
+ dba_tablespace_usage_metrics m
+ JOIN dba_tablespaces t ON m.tablespace_name = t.tablespace_name
+'''
+QUERY_ALLOCATED = '''
+SELECT
+ nvl(b.tablespace_name,nvl(a.tablespace_name,'UNKNOWN')) tablespace_name,
+ bytes_alloc used_bytes,
+ bytes_alloc-nvl(bytes_free,0) max_bytes,
+ ((bytes_alloc-nvl(bytes_free,0))/ bytes_alloc)*100 used_percent
+FROM
+ (SELECT
+ sum(bytes) bytes_free,
+ tablespace_name
+ FROM sys.dba_free_space
+ GROUP BY tablespace_name
+ ) a,
+ (SELECT
+ sum(bytes) bytes_alloc,
+ tablespace_name
+ FROM sys.dba_data_files
+ GROUP BY tablespace_name
+ ) b
+WHERE a.tablespace_name (+) = b.tablespace_name
+'''
+QUERY_ACTIVITIES_COUNT = '''
+SELECT
+ name,
+ value
+FROM
+ v$sysstat
+WHERE
+ name IN (
+ 'parse count (total)',
+ 'execute count',
+ 'user commits',
+ 'user rollbacks'
+ )
+'''
+QUERY_WAIT_TIME = '''
+SELECT
+ n.wait_class,
+ round(m.time_waited / m.INTSIZE_CSEC, 3)
+FROM
+ v$waitclassmetric m,
+ v$system_wait_class n
+WHERE
+ m.wait_class_id = n.wait_class_id
+ AND n.wait_class != 'Idle'
+'''
+# QUERY_SESSION_COUNT = '''
+# SELECT
+# status,
+# type
+# FROM
+# v$session
+# GROUP BY
+# status,
+# type
+# '''
+# QUERY_PROCESSES_COUNT = '''
+# SELECT
+# COUNT(*)
+# FROM
+# v$process
+# '''
+# QUERY_PROCESS = '''
+# SELECT
+# program,
+# pga_used_mem,
+# pga_alloc_mem,
+# pga_freeable_mem,
+# pga_max_mem
+# FROM
+# gv$process
+# '''
+
+# PROCESS_METRICS = [
+# 'pga_used_memory',
+# 'pga_allocated_memory',
+# 'pga_freeable_memory',
+# 'pga_maximum_memory',
+# ]
+
+
+SYS_METRICS = {
+ 'Average Active Sessions': 'average_active_sessions',
+ 'Session Count': 'session_count',
+ 'Session Limit %': 'session_limit_percent',
+ 'Logons Per Sec': 'logons_per_sec',
+ 'Physical Reads Per Sec': 'physical_reads_per_sec',
+ 'Physical Writes Per Sec': 'physical_writes_per_sec',
+ 'Disk Sort Per Sec': 'disk_sort_per_sec',
+ 'Long Table Scans Per Sec': 'long_table_scans_per_sec',
+ 'Database Wait Time Ratio': 'database_wait_time_ratio',
+ 'Shared Pool Free %': 'shared_pool_free_percent',
+ 'Memory Sorts Ratio': 'memory_sorts_ratio',
+ 'SQL Service Response Time': 'sql_service_response_time',
+ 'User Rollbacks Per Sec': 'user_rollbacks_per_sec',
+ 'Enqueue Timeouts Per Sec': 'enqueue_timeouts_per_sec',
+ 'Buffer Cache Hit Ratio': 'buffer_cache_hit_ratio',
+ 'Cursor Cache Hit Ratio': 'cursor_cache_hit_ratio',
+ 'Library Cache Hit Ratio': 'library_cache_hit_ratio',
+ 'Row Cache Hit Ratio': 'row_cache_hit_ratio',
+ 'Global Cache Blocks Corrupted': 'global_cache_blocks_corrupted',
+ 'Global Cache Blocks Lost': 'global_cache_blocks_lost',
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.user = configuration.get('user')
+ self.password = configuration.get('password')
+ self.server = configuration.get('server')
+ self.service = configuration.get('service')
+ self.protocol = configuration.get('protocol', 'tcps')
+ self.alive = False
+ self.conn = None
+ self.active_tablespaces = set()
+
+ def connect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+ if HAS_ORACLE_NEW:
+ try:
+ self.conn = cx_Oracle.connect(
+ f'{self.user}/{self.password}@{self.protocol}://{self.server}/{self.service}')
+ except cx_Oracle.DatabaseError as error:
+ self.error(error)
+ return False
+ else:
+ try:
+ self.conn = cx_Oracle.connect(
+ CX_CONNECT_STRING_OLD.format(
+ self.user,
+ self.password,
+ self.server,
+ self.service,
+ ))
+ except cx_Oracle.DatabaseError as error:
+ self.error(error)
+ return False
+
+ self.alive = True
+ return True
+
+ def reconnect(self):
+ return self.connect()
+
+ def check(self):
+ if not HAS_ORACLE_NEW and not HAS_ORACLE_OLD:
+ self.error("'oracledb' package is needed to use oracledb module")
+ return False
+
+ if not all([
+ self.user,
+ self.password,
+ self.server,
+ self.service
+ ]):
+ self.error("one of these parameters is not specified: user, password, server, service")
+ return False
+
+ if not self.connect():
+ return False
+
+ return bool(self.get_data())
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ data = dict()
+
+ # SYSTEM
+ try:
+ rv = self.gather_system_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, value in rv:
+ if name not in SYS_METRICS:
+ continue
+ data[SYS_METRICS[name]] = int(float(value) * 1000)
+
+ # ACTIVITIES COUNT
+ try:
+ rv = self.gather_activities_count()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, amount in rv:
+ cleaned = name.replace(' ', '_').replace('(', '').replace(')', '')
+ new_name = 'activity_{0}'.format(cleaned)
+ data[new_name] = int(float(amount) * 1000)
+
+ # WAIT TIME
+ try:
+ rv = self.gather_wait_time_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, amount in rv:
+ cleaned = name.replace(' ', '_').replace('/', '').lower()
+ new_name = 'wait_time_{0}'.format(cleaned)
+ data[new_name] = amount
+
+ # TABLESPACE
+ try:
+ rv = self.gather_tablespace_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, offline, size, used, used_in_percent in rv:
+ # TODO: skip offline?
+ if not (not offline and self.charts):
+ continue
+ # TODO: remove inactive?
+ if name not in self.active_tablespaces:
+ self.active_tablespaces.add(name)
+ self.add_tablespace_to_charts(name)
+ data['{0}_tablespace_size'.format(name)] = int(size * 1000)
+ data['{0}_tablespace_used'.format(name)] = int(used * 1000)
+ data['{0}_tablespace_used_in_percent'.format(name)] = int(used_in_percent * 1000)
+
+ # ALLOCATED SPACE
+ try:
+ rv = self.gather_allocated_metrics()
+ except cx_Oracle.Error as error:
+ self.error(error)
+ self.alive = False
+ return None
+ else:
+ for name, offline, size, used, used_in_percent in rv:
+ # TODO: skip offline?
+ if not (not offline and self.charts):
+ continue
+ # TODO: remove inactive?
+ if name not in self.active_tablespaces:
+ self.active_tablespaces.add(name)
+ self.add_tablespace_to_charts(name)
+ data['{0}_allocated_size'.format(name)] = int(size * 1000)
+ data['{0}_allocated_used'.format(name)] = int(used * 1000)
+ data['{0}_allocated_used_in_percent'.format(name)] = int(used_in_percent * 1000)
+
+ return data or None
+
+ def gather_system_metrics(self):
+
+ """
+ :return:
+
+ [['Buffer Cache Hit Ratio', 100],
+ ['Memory Sorts Ratio', 100],
+ ['Redo Allocation Hit Ratio', 100],
+ ['User Transaction Per Sec', 0],
+ ['Physical Reads Per Sec', 0],
+ ['Physical Reads Per Txn', 0],
+ ['Physical Writes Per Sec', 0],
+ ['Physical Writes Per Txn', 0],
+ ['Physical Reads Direct Per Sec', 0],
+ ['Physical Reads Direct Per Txn', 0],
+ ['Physical Writes Direct Per Sec', 0],
+ ['Physical Writes Direct Per Txn', 0],
+ ['Physical Reads Direct Lobs Per Sec', 0],
+ ['Physical Reads Direct Lobs Per Txn', 0],
+ ['Physical Writes Direct Lobs Per Sec', 0],
+ ['Physical Writes Direct Lobs Per Txn', 0],
+ ['Redo Generated Per Sec', Decimal('4.66666666666667')],
+ ['Redo Generated Per Txn', 280],
+ ['Logons Per Sec', Decimal('0.0166666666666667')],
+ ['Logons Per Txn', 1],
+ ['Open Cursors Per Sec', 0.35],
+ ['Open Cursors Per Txn', 21],
+ ['User Commits Per Sec', 0],
+ ['User Commits Percentage', 0],
+ ['User Rollbacks Per Sec', 0],
+ ['User Rollbacks Percentage', 0],
+ ['User Calls Per Sec', Decimal('0.0333333333333333')],
+ ['User Calls Per Txn', 2],
+ ['Recursive Calls Per Sec', 14.15],
+ ['Recursive Calls Per Txn', 849],
+ ['Logical Reads Per Sec', Decimal('0.683333333333333')],
+ ['Logical Reads Per Txn', 41],
+ ['DBWR Checkpoints Per Sec', 0],
+ ['Background Checkpoints Per Sec', 0],
+ ['Redo Writes Per Sec', Decimal('0.0333333333333333')],
+ ['Redo Writes Per Txn', 2],
+ ['Long Table Scans Per Sec', 0],
+ ['Long Table Scans Per Txn', 0],
+ ['Total Table Scans Per Sec', Decimal('0.0166666666666667')],
+ ['Total Table Scans Per Txn', 1],
+ ['Full Index Scans Per Sec', 0],
+ ['Full Index Scans Per Txn', 0],
+ ['Total Index Scans Per Sec', Decimal('0.216666666666667')],
+ ['Total Index Scans Per Txn', 13],
+ ['Total Parse Count Per Sec', 0.35],
+ ['Total Parse Count Per Txn', 21],
+ ['Hard Parse Count Per Sec', 0],
+ ['Hard Parse Count Per Txn', 0],
+ ['Parse Failure Count Per Sec', 0],
+ ['Parse Failure Count Per Txn', 0],
+ ['Cursor Cache Hit Ratio', Decimal('52.3809523809524')],
+ ['Disk Sort Per Sec', 0],
+ ['Disk Sort Per Txn', 0],
+ ['Rows Per Sort', 8.6],
+ ['Execute Without Parse Ratio', Decimal('27.5862068965517')],
+ ['Soft Parse Ratio', 100],
+ ['User Calls Ratio', Decimal('0.235017626321974')],
+ ['Host CPU Utilization (%)', Decimal('0.124311845142959')],
+ ['Network Traffic Volume Per Sec', 0],
+ ['Enqueue Timeouts Per Sec', 0],
+ ['Enqueue Timeouts Per Txn', 0],
+ ['Enqueue Waits Per Sec', 0],
+ ['Enqueue Waits Per Txn', 0],
+ ['Enqueue Deadlocks Per Sec', 0],
+ ['Enqueue Deadlocks Per Txn', 0],
+ ['Enqueue Requests Per Sec', Decimal('216.683333333333')],
+ ['Enqueue Requests Per Txn', 13001],
+ ['DB Block Gets Per Sec', 0],
+ ['DB Block Gets Per Txn', 0],
+ ['Consistent Read Gets Per Sec', Decimal('0.683333333333333')],
+ ['Consistent Read Gets Per Txn', 41],
+ ['DB Block Changes Per Sec', 0],
+ ['DB Block Changes Per Txn', 0],
+ ['Consistent Read Changes Per Sec', 0],
+ ['Consistent Read Changes Per Txn', 0],
+ ['CPU Usage Per Sec', 0],
+ ['CPU Usage Per Txn', 0],
+ ['CR Blocks Created Per Sec', 0],
+ ['CR Blocks Created Per Txn', 0],
+ ['CR Undo Records Applied Per Sec', 0],
+ ['CR Undo Records Applied Per Txn', 0],
+ ['User Rollback UndoRec Applied Per Sec', 0],
+ ['User Rollback Undo Records Applied Per Txn', 0],
+ ['Leaf Node Splits Per Sec', 0],
+ ['Leaf Node Splits Per Txn', 0],
+ ['Branch Node Splits Per Sec', 0],
+ ['Branch Node Splits Per Txn', 0],
+ ['PX downgraded 1 to 25% Per Sec', 0],
+ ['PX downgraded 25 to 50% Per Sec', 0],
+ ['PX downgraded 50 to 75% Per Sec', 0],
+ ['PX downgraded 75 to 99% Per Sec', 0],
+ ['PX downgraded to serial Per Sec', 0],
+ ['Physical Read Total IO Requests Per Sec', Decimal('2.16666666666667')],
+ ['Physical Read Total Bytes Per Sec', Decimal('35498.6666666667')],
+ ['GC CR Block Received Per Second', 0],
+ ['GC CR Block Received Per Txn', 0],
+ ['GC Current Block Received Per Second', 0],
+ ['GC Current Block Received Per Txn', 0],
+ ['Global Cache Average CR Get Time', 0],
+ ['Global Cache Average Current Get Time', 0],
+ ['Physical Write Total IO Requests Per Sec', Decimal('0.966666666666667')],
+ ['Global Cache Blocks Corrupted', 0],
+ ['Global Cache Blocks Lost', 0],
+ ['Current Logons Count', 49],
+ ['Current Open Cursors Count', 64],
+ ['User Limit %', Decimal('0.00000114087015416959')],
+ ['SQL Service Response Time', 0],
+ ['Database Wait Time Ratio', 0],
+ ['Database CPU Time Ratio', 0],
+ ['Response Time Per Txn', 0],
+ ['Row Cache Hit Ratio', 100],
+ ['Row Cache Miss Ratio', 0],
+ ['Library Cache Hit Ratio', 100],
+ ['Library Cache Miss Ratio', 0],
+ ['Shared Pool Free %', Decimal('7.82380268491548')],
+ ['PGA Cache Hit %', Decimal('98.0399767109115')],
+ ['Process Limit %', Decimal('17.6666666666667')],
+ ['Session Limit %', Decimal('15.2542372881356')],
+ ['Executions Per Txn', 29],
+ ['Executions Per Sec', Decimal('0.483333333333333')],
+ ['Txns Per Logon', 0],
+ ['Database Time Per Sec', 0],
+ ['Physical Write Total Bytes Per Sec', 15308.8],
+ ['Physical Read IO Requests Per Sec', 0],
+ ['Physical Read Bytes Per Sec', 0],
+ ['Physical Write IO Requests Per Sec', 0],
+ ['Physical Write Bytes Per Sec', 0],
+ ['DB Block Changes Per User Call', 0],
+ ['DB Block Gets Per User Call', 0],
+ ['Executions Per User Call', 14.5],
+ ['Logical Reads Per User Call', 20.5],
+ ['Total Sorts Per User Call', 2.5],
+ ['Total Table Scans Per User Call', 0.5],
+ ['Current OS Load', 0.0390625],
+ ['Streams Pool Usage Percentage', 0],
+ ['PQ QC Session Count', 0],
+ ['PQ Slave Session Count', 0],
+ ['Queries parallelized Per Sec', 0],
+ ['DML statements parallelized Per Sec', 0],
+ ['DDL statements parallelized Per Sec', 0],
+ ['PX operations not downgraded Per Sec', 0],
+ ['Session Count', 72],
+ ['Average Synchronous Single-Block Read Latency', 0],
+ ['I/O Megabytes per Second', 0.05],
+ ['I/O Requests per Second', Decimal('3.13333333333333')],
+ ['Average Active Sessions', 0],
+ ['Active Serial Sessions', 1],
+ ['Active Parallel Sessions', 0],
+ ['Captured user calls', 0],
+ ['Replayed user calls', 0],
+ ['Workload Capture and Replay status', 0],
+ ['Background CPU Usage Per Sec', Decimal('1.22578833333333')],
+ ['Background Time Per Sec', 0.0147551],
+ ['Host CPU Usage Per Sec', Decimal('0.116666666666667')],
+ ['Cell Physical IO Interconnect Bytes', 3048448],
+ ['Temp Space Used', 0],
+ ['Total PGA Allocated', 200657920],
+ ['Total PGA Used by SQL Workareas', 0],
+ ['Run Queue Per Sec', 0],
+ ['VM in bytes Per Sec', 0],
+ ['VM out bytes Per Sec', 0]]
+ """
+
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_SYSTEM)
+ for metric_name, value in cursor.fetchall():
+ metrics.append([metric_name, value])
+ return metrics
+
+ def gather_tablespace_metrics(self):
+ """
+ :return:
+
+ [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
+ ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
+ ['TEMP', 0.0, 3233177600.0, 0.0, 0],
+ ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
+ """
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_TABLESPACE)
+ for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
+ if used_bytes is None:
+ offline = True
+ used = 0
+ else:
+ offline = False
+ used = float(used_bytes)
+ if max_bytes is None:
+ size = 0
+ else:
+ size = float(max_bytes)
+ if used_percent is None:
+ used_percent = 0
+ else:
+ used_percent = float(used_percent)
+ metrics.append(
+ [
+ tablespace_name,
+ offline,
+ size,
+ used,
+ used_percent,
+ ]
+ )
+ return metrics
+
+ def gather_allocated_metrics(self):
+ """
+ :return:
+
+ [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
+ ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
+ ['TEMP', 0.0, 3233177600.0, 0.0, 0],
+ ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
+ """
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_ALLOCATED)
+ for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
+ if used_bytes is None:
+ offline = True
+ used = 0
+ else:
+ offline = False
+ used = float(used_bytes)
+ if max_bytes is None:
+ size = 0
+ else:
+ size = float(max_bytes)
+ if used_percent is None:
+ used_percent = 0
+ else:
+ used_percent = float(used_percent)
+ metrics.append(
+ [
+ tablespace_name,
+ offline,
+ size,
+ used,
+ used_percent,
+ ]
+ )
+ return metrics
+
+ def gather_wait_time_metrics(self):
+ """
+ :return:
+
+ [['Other', 0],
+ ['Application', 0],
+ ['Configuration', 0],
+ ['Administrative', 0],
+ ['Concurrency', 0],
+ ['Commit', 0],
+ ['Network', 0],
+ ['User I/O', 0],
+ ['System I/O', 0.002],
+ ['Scheduler', 0]]
+ """
+ metrics = list()
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_WAIT_TIME)
+ for wait_class_name, value in cursor.fetchall():
+ metrics.append([wait_class_name, value])
+ return metrics
+
+ def gather_activities_count(self):
+ """
+ :return:
+
+ [('user commits', 9104),
+ ('user rollbacks', 17),
+ ('parse count (total)', 483695),
+ ('execute count', 2020356)]
+ """
+ with self.conn.cursor() as cursor:
+ cursor.execute(QUERY_ACTIVITIES_COUNT)
+ return cursor.fetchall()
+
+ # def gather_process_metrics(self):
+ # """
+ # :return:
+ #
+ # [['PSEUDO', 'pga_used_memory', 0],
+ # ['PSEUDO', 'pga_allocated_memory', 0],
+ # ['PSEUDO', 'pga_freeable_memory', 0],
+ # ['PSEUDO', 'pga_maximum_memory', 0],
+ # ['oracle@localhost.localdomain (PMON)', 'pga_used_memory', 1793827],
+ # ['oracle@localhost.localdomain (PMON)', 'pga_allocated_memory', 1888651],
+ # ['oracle@localhost.localdomain (PMON)', 'pga_freeable_memory', 0],
+ # ['oracle@localhost.localdomain (PMON)', 'pga_maximum_memory', 1888651],
+ # ...
+ # ...
+ # """
+ #
+ # metrics = list()
+ # with self.conn.cursor() as cursor:
+ # cursor.execute(QUERY_PROCESS)
+ # for row in cursor.fetchall():
+ # for i, name in enumerate(PROCESS_METRICS, 1):
+ # metrics.append([row[0], name, row[i]])
+ # return metrics
+
+ # def gather_processes_count(self):
+ # with self.conn.cursor() as cursor:
+ # cursor.execute(QUERY_PROCESSES_COUNT)
+ # return cursor.fetchone()[0] # 53
+
+ # def gather_sessions_count(self):
+ # with self.conn.cursor() as cursor:
+ # cursor.execute(QUERY_SESSION_COUNT)
+ # total, active, inactive = 0, 0, 0
+ # for status, _ in cursor.fetchall():
+ # total += 1
+ # active += status == 'ACTIVE'
+ # inactive += status == 'INACTIVE'
+ # return [total, active, inactive]
+
+ def add_tablespace_to_charts(self, name):
+ self.charts['tablespace_size'].add_dimension(
+ [
+ '{0}_tablespace_size'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1024 * 1000,
+ ])
+ self.charts['tablespace_usage'].add_dimension(
+ [
+ '{0}_tablespace_used'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1024 * 1000,
+ ])
+ self.charts['tablespace_usage_in_percent'].add_dimension(
+ [
+ '{0}_tablespace_used_in_percent'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_size'].add_dimension(
+ [
+ '{0}_allocated_size'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_usage'].add_dimension(
+ [
+ '{0}_allocated_used'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
+ self.charts['allocated_usage_in_percent'].add_dimension(
+ [
+ '{0}_allocated_used_in_percent'.format(name),
+ name,
+ 'absolute',
+ 1,
+ 1000,
+ ])
diff --git a/collectors/python.d.plugin/oracledb/oracledb.conf b/collectors/python.d.plugin/oracledb/oracledb.conf
new file mode 100644
index 00000000..027215da
--- /dev/null
+++ b/collectors/python.d.plugin/oracledb/oracledb.conf
@@ -0,0 +1,88 @@
+# netdata python.d.plugin configuration for oracledb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, oracledb also supports the following:
+#
+# user: username # the username for the user account. Required.
+# password: password # the password for the user account. Required.
+# server: localhost:1521 # the IP address or hostname (and port) of the Oracle Database Server. Required.
+# service: XE # the Oracle Database service name. Required. To view the services available on your server,
+# run this query: `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`.
+# protocol: tcp/tcps # one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic
+# or encrypted network traffic
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+#local:
+# user: 'netdata'
+# password: 'secret'
+# server: 'localhost:1521'
+# service: 'XE'
+# protocol: 'tcps'
+
+#remote:
+# user: 'netdata'
+# password: 'secret'
+# server: '10.0.0.1:1521'
+# service: 'XE'
+# protocol: 'tcps'
diff --git a/collectors/python.d.plugin/pandas/Makefile.inc b/collectors/python.d.plugin/pandas/Makefile.inc
new file mode 100644
index 00000000..9f4f9b34
--- /dev/null
+++ b/collectors/python.d.plugin/pandas/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += pandas/pandas.chart.py
+dist_pythonconfig_DATA += pandas/pandas.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += pandas/README.md pandas/Makefile.inc
+
diff --git a/collectors/python.d.plugin/pandas/README.md b/collectors/python.d.plugin/pandas/README.md
new file mode 120000
index 00000000..2fabe63c
--- /dev/null
+++ b/collectors/python.d.plugin/pandas/README.md
@@ -0,0 +1 @@
+integrations/pandas.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/pandas/integrations/pandas.md b/collectors/python.d.plugin/pandas/integrations/pandas.md
new file mode 100644
index 00000000..83c5c66b
--- /dev/null
+++ b/collectors/python.d.plugin/pandas/integrations/pandas.md
@@ -0,0 +1,365 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/pandas/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/pandas/metadata.yaml"
+sidebar_label: "Pandas"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Generic Data Collection"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Pandas
+
+
+<img src="https://netdata.cloud/img/pandas.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: pandas
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.
+If you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),
+either locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.
+
+This collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.
+
+
+The collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+This collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken
+as the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).
+See [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html)."
+
+
+### Per Pandas instance
+
+These metrics refer to the entire monitored application.
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Python Requirements
+
+This collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.
+
+```bash
+sudo pip install pandas requests
+```
+
+Note: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.
+
+```bash
+sudo pip install 'sqlalchemy<2.0' psycopg2-binary
+```
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/pandas.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/pandas.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| chart_configs | an array of chart configuration dictionaries | [] | yes |
+| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+
+##### Temperature API Example
+
+example pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.
+
+<details><summary>Config</summary>
+
+```yaml
+temperature:
+ name: "temperature"
+ update_every: 5
+ chart_configs:
+ - name: "temperature_forecast_by_city"
+ title: "Temperature By City - Today Forecast"
+ family: "temperature.today"
+ context: "pandas.temperature"
+ type: "line"
+ units: "Celsius"
+ df_steps: >
+ pd.DataFrame.from_dict(
+ {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']
+ for (city,lat,lng)
+ in [
+ ('dublin', 53.3441, -6.2675),
+ ('athens', 37.9792, 23.7166),
+ ('london', 51.5002, -0.1262),
+ ('berlin', 52.5235, 13.4115),
+ ('paris', 48.8567, 2.3510),
+ ('madrid', 40.4167, -3.7033),
+ ('new_york', 40.71, -74.01),
+ ('los_angeles', 34.05, -118.24),
+ ]
+ }
+ );
+ df.describe(); # get aggregate stats for each city;
+ df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;
+ df.rename(columns={'index':'city'}); # some column renaming;
+ df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;
+ df.rename(columns={0:'degrees'}); # some column renaming;
+ pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;
+ df.rename(columns={0:'measurement'}); # some column renaming;
+ df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;
+ df.sort_index(); # sort by city name;
+ df.transpose(); # transpose so its just one wide row;
+ - name: "temperature_current_by_city"
+ title: "Temperature By City - Current"
+ family: "temperature.current"
+ context: "pandas.temperature"
+ type: "line"
+ units: "Celsius"
+ df_steps: >
+ pd.DataFrame.from_dict(
+ {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&current_weather=true').json()['current_weather']
+ for (city,lat,lng)
+ in [
+ ('dublin', 53.3441, -6.2675),
+ ('athens', 37.9792, 23.7166),
+ ('london', 51.5002, -0.1262),
+ ('berlin', 52.5235, 13.4115),
+ ('paris', 48.8567, 2.3510),
+ ('madrid', 40.4167, -3.7033),
+ ('new_york', 40.71, -74.01),
+ ('los_angeles', 34.05, -118.24),
+ ]
+ }
+ );
+ df.transpose();
+ df[['temperature']];
+ df.transpose();
+
+```
+</details>
+
+##### API CSV Example
+
+example showing a read_csv from a url and some light pandas data wrangling.
+
+<details><summary>Config</summary>
+
+```yaml
+example_csv:
+ name: "example_csv"
+ update_every: 2
+ chart_configs:
+ - name: "london_system_cpu"
+ title: "London System CPU - Ratios"
+ family: "london_system_cpu"
+ context: "pandas"
+ type: "line"
+ units: "n"
+ df_steps: >
+ pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});
+ df.drop('time', axis=1);
+ df.mean().to_frame().transpose();
+ df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();
+ df.rename(columns={0:'average_user_system_ratio'});
+ df*100;
+
+```
+</details>
+
+##### API JSON Example
+
+example showing a read_json from a url and some light pandas data wrangling.
+
+<details><summary>Config</summary>
+
+```yaml
+example_json:
+ name: "example_json"
+ update_every: 2
+ chart_configs:
+ - name: "london_system_net"
+ title: "London System Net - Total Bandwidth"
+ family: "london_system_net"
+ context: "pandas"
+ type: "area"
+ units: "kilobits/s"
+ df_steps: >
+ pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);
+ df.drop('time', axis=1);
+ abs(df);
+ df.sum(axis=1).to_frame();
+ df.rename(columns={0:'total_bandwidth'});
+
+```
+</details>
+
+##### XML Example
+
+example showing a read_xml from a url and some light pandas data wrangling.
+
+<details><summary>Config</summary>
+
+```yaml
+example_xml:
+ name: "example_xml"
+ update_every: 2
+ line_sep: "|"
+ chart_configs:
+ - name: "temperature_forcast"
+ title: "Temperature Forecast"
+ family: "temp"
+ context: "pandas.temp"
+ type: "line"
+ units: "celsius"
+ df_steps: >
+ pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|
+ df.rename(columns={'value': 'dublin'})|
+ df[['dublin']]|
+
+```
+</details>
+
+##### SQL Example
+
+example showing a read_sql from a postgres database using sqlalchemy.
+
+<details><summary>Config</summary>
+
+```yaml
+sql:
+ name: "sql"
+ update_every: 5
+ chart_configs:
+ - name: "sql"
+ title: "SQL Example"
+ family: "sql.example"
+ context: "example"
+ type: "line"
+ units: "percent"
+ df_steps: >
+ pd.read_sql_query(
+ sql='\
+ select \
+ random()*100 as metric_1, \
+ random()*100 as metric_2 \
+ ',
+ con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')
+ );
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin pandas debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/pandas/metadata.yaml b/collectors/python.d.plugin/pandas/metadata.yaml
new file mode 100644
index 00000000..92ee1e98
--- /dev/null
+++ b/collectors/python.d.plugin/pandas/metadata.yaml
@@ -0,0 +1,308 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: pandas
+ monitored_instance:
+ name: Pandas
+ link: https://pandas.pydata.org/
+ categories:
+ - data-collection.generic-data-collection
+ icon_filename: pandas.png
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - pandas
+ - python
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ [Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.
+ If you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),
+ either locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.
+
+ This collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.
+ method_description: |
+ The collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Python Requirements
+ description: |
+ This collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.
+
+ ```bash
+ sudo pip install pandas requests
+ ```
+
+ Note: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.
+
+ ```bash
+ sudo pip install 'sqlalchemy<2.0' psycopg2-binary
+ ```
+ configuration:
+ file:
+ name: python.d/pandas.conf
+ description: ""
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: chart_configs
+ description: an array of chart configuration dictionaries
+ default_value: "[]"
+ required: true
+ - name: chart_configs.name
+ description: name of the chart to be displayed in the dashboard.
+ default_value: None
+ required: true
+ - name: chart_configs.title
+ description: title of the chart to be displayed in the dashboard.
+ default_value: None
+ required: true
+ - name: chart_configs.family
+ description: "[family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard."
+ default_value: None
+ required: true
+ - name: chart_configs.context
+ description: "[context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard."
+ default_value: None
+ required: true
+ - name: chart_configs.type
+ description: the type of the chart to be displayed in the dashboard.
+ default_value: None
+ required: true
+ - name: chart_configs.units
+ description: the units of the chart to be displayed in the dashboard.
+ default_value: None
+ required: true
+ - name: chart_configs.df_steps
+ description: a series of pandas operations (one per line) that each returns a dataframe.
+ default_value: None
+ required: true
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: Config
+ list:
+ - name: Temperature API Example
+ folding:
+ enabled: true
+ description: example pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.
+ config: |
+ temperature:
+ name: "temperature"
+ update_every: 5
+ chart_configs:
+ - name: "temperature_forecast_by_city"
+ title: "Temperature By City - Today Forecast"
+ family: "temperature.today"
+ context: "pandas.temperature"
+ type: "line"
+ units: "Celsius"
+ df_steps: >
+ pd.DataFrame.from_dict(
+ {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']
+ for (city,lat,lng)
+ in [
+ ('dublin', 53.3441, -6.2675),
+ ('athens', 37.9792, 23.7166),
+ ('london', 51.5002, -0.1262),
+ ('berlin', 52.5235, 13.4115),
+ ('paris', 48.8567, 2.3510),
+ ('madrid', 40.4167, -3.7033),
+ ('new_york', 40.71, -74.01),
+ ('los_angeles', 34.05, -118.24),
+ ]
+ }
+ );
+ df.describe(); # get aggregate stats for each city;
+ df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;
+ df.rename(columns={'index':'city'}); # some column renaming;
+ df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;
+ df.rename(columns={0:'degrees'}); # some column renaming;
+ pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;
+ df.rename(columns={0:'measurement'}); # some column renaming;
+ df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;
+ df.sort_index(); # sort by city name;
+ df.transpose(); # transpose so its just one wide row;
+ - name: "temperature_current_by_city"
+ title: "Temperature By City - Current"
+ family: "temperature.current"
+ context: "pandas.temperature"
+ type: "line"
+ units: "Celsius"
+ df_steps: >
+ pd.DataFrame.from_dict(
+ {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&current_weather=true').json()['current_weather']
+ for (city,lat,lng)
+ in [
+ ('dublin', 53.3441, -6.2675),
+ ('athens', 37.9792, 23.7166),
+ ('london', 51.5002, -0.1262),
+ ('berlin', 52.5235, 13.4115),
+ ('paris', 48.8567, 2.3510),
+ ('madrid', 40.4167, -3.7033),
+ ('new_york', 40.71, -74.01),
+ ('los_angeles', 34.05, -118.24),
+ ]
+ }
+ );
+ df.transpose();
+ df[['temperature']];
+ df.transpose();
+ - name: API CSV Example
+ folding:
+ enabled: true
+ description: example showing a read_csv from a url and some light pandas data wrangling.
+ config: |
+ example_csv:
+ name: "example_csv"
+ update_every: 2
+ chart_configs:
+ - name: "london_system_cpu"
+ title: "London System CPU - Ratios"
+ family: "london_system_cpu"
+ context: "pandas"
+ type: "line"
+ units: "n"
+ df_steps: >
+ pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});
+ df.drop('time', axis=1);
+ df.mean().to_frame().transpose();
+ df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();
+ df.rename(columns={0:'average_user_system_ratio'});
+ df*100;
+ - name: API JSON Example
+ folding:
+ enabled: true
+ description: example showing a read_json from a url and some light pandas data wrangling.
+ config: |
+ example_json:
+ name: "example_json"
+ update_every: 2
+ chart_configs:
+ - name: "london_system_net"
+ title: "London System Net - Total Bandwidth"
+ family: "london_system_net"
+ context: "pandas"
+ type: "area"
+ units: "kilobits/s"
+ df_steps: >
+ pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);
+ df.drop('time', axis=1);
+ abs(df);
+ df.sum(axis=1).to_frame();
+ df.rename(columns={0:'total_bandwidth'});
+ - name: XML Example
+ folding:
+ enabled: true
+ description: example showing a read_xml from a url and some light pandas data wrangling.
+ config: |
+ example_xml:
+ name: "example_xml"
+ update_every: 2
+ line_sep: "|"
+ chart_configs:
+ - name: "temperature_forcast"
+ title: "Temperature Forecast"
+ family: "temp"
+ context: "pandas.temp"
+ type: "line"
+ units: "celsius"
+ df_steps: >
+ pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|
+ df.rename(columns={'value': 'dublin'})|
+ df[['dublin']]|
+ - name: SQL Example
+ folding:
+ enabled: true
+ description: example showing a read_sql from a postgres database using sqlalchemy.
+ config: |
+ sql:
+ name: "sql"
+ update_every: 5
+ chart_configs:
+ - name: "sql"
+ title: "SQL Example"
+ family: "sql.example"
+ context: "example"
+ type: "line"
+ units: "percent"
+ df_steps: >
+ pd.read_sql_query(
+ sql='\
+ select \
+ random()*100 as metric_1, \
+ random()*100 as metric_2 \
+ ',
+ con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')
+ );
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: |
+ This collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken
+ as the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).
+ See [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html)."
+ availability: []
+ scopes:
+ - name: global
+ description: |
+ These metrics refer to the entire monitored application.
+ labels: []
+ metrics: []
diff --git a/collectors/python.d.plugin/pandas/pandas.chart.py b/collectors/python.d.plugin/pandas/pandas.chart.py
new file mode 100644
index 00000000..7977bcb3
--- /dev/null
+++ b/collectors/python.d.plugin/pandas/pandas.chart.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+# Description: pandas netdata python.d module
+# Author: Andrew Maguire (andrewm4894)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import pandas as pd
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from sqlalchemy import create_engine
+ HAS_SQLALCHEMY = True
+except ImportError:
+ HAS_SQLALCHEMY = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+ORDER = []
+
+CHARTS = {}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.chart_configs = self.configuration.get('chart_configs', None)
+ self.line_sep = self.configuration.get('line_sep', ';')
+
+ def run_code(self, df_steps):
+ """eval() each line of code and ensure the result is a pandas dataframe"""
+
+ # process each line of code
+ lines = df_steps.split(self.line_sep)
+ for line in lines:
+ line_clean = line.strip('\n').strip(' ')
+ if line_clean != '' and line_clean[0] != '#':
+ df = eval(line_clean)
+ assert isinstance(df, pd.DataFrame), 'The result of each evaluated line of `df_steps` must be of type `pd.DataFrame`'
+
+ # take top row of final df as data to be collected by netdata
+ data = df.to_dict(orient='records')[0]
+
+ return data
+
+ def check(self):
+ """ensure charts and dims all configured and that we can get data"""
+
+ if not HAS_REQUESTS:
+ self.warning('requests library could not be imported')
+
+ if not HAS_SQLALCHEMY:
+ self.warning('sqlalchemy library could not be imported')
+
+ if not self.chart_configs:
+ self.error('chart_configs must be defined')
+
+ data = dict()
+
+ # add each chart as defined by the config
+ for chart_config in self.chart_configs:
+ if chart_config['name'] not in self.charts:
+ chart_template = {
+ 'options': [
+ chart_config['name'],
+ chart_config['title'],
+ chart_config['units'],
+ chart_config['family'],
+ chart_config['context'],
+ chart_config['type']
+ ],
+ 'lines': []
+ }
+ self.charts.add_chart([chart_config['name']] + chart_template['options'])
+
+ data_tmp = self.run_code(chart_config['df_steps'])
+ data.update(data_tmp)
+
+ for dim in data_tmp:
+ self.charts[chart_config['name']].add_dimension([dim, dim, 'absolute', 1, 1])
+
+ return True
+
+ def get_data(self):
+ """get data for each chart config"""
+
+ data = dict()
+
+ for chart_config in self.chart_configs:
+ data_tmp = self.run_code(chart_config['df_steps'])
+ data.update(data_tmp)
+
+ return data
diff --git a/collectors/python.d.plugin/pandas/pandas.conf b/collectors/python.d.plugin/pandas/pandas.conf
new file mode 100644
index 00000000..74a7da3e
--- /dev/null
+++ b/collectors/python.d.plugin/pandas/pandas.conf
@@ -0,0 +1,211 @@
+# netdata python.d.plugin configuration for pandas
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear on the dashboard
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# chart_configs: [<dictionary>] # an array for chart config dictionaries.
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+# Some example configurations, enable this collector, uncomment and example below and restart netdata to enable.
+
+# example pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.
+# temperature:
+# name: "temperature"
+# update_every: 5
+# chart_configs:
+# - name: "temperature_forecast_by_city"
+# title: "Temperature By City - Today Forecast"
+# family: "temperature.today"
+# context: "pandas.temperature"
+# type: "line"
+# units: "Celsius"
+# df_steps: >
+# pd.DataFrame.from_dict(
+# {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']
+# for (city,lat,lng)
+# in [
+# ('dublin', 53.3441, -6.2675),
+# ('athens', 37.9792, 23.7166),
+# ('london', 51.5002, -0.1262),
+# ('berlin', 52.5235, 13.4115),
+# ('paris', 48.8567, 2.3510),
+# ('madrid', 40.4167, -3.7033),
+# ('new_york', 40.71, -74.01),
+# ('los_angeles', 34.05, -118.24),
+# ]
+# }
+# );
+# df.describe(); # get aggregate stats for each city;
+# df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;
+# df.rename(columns={'index':'city'}); # some column renaming;
+# df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;
+# df.rename(columns={0:'degrees'}); # some column renaming;
+# pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;
+# df.rename(columns={0:'measurement'}); # some column renaming;
+# df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;
+# df.sort_index(); # sort by city name;
+# df.transpose(); # transpose so its just one wide row;
+# - name: "temperature_current_by_city"
+# title: "Temperature By City - Current"
+# family: "temperature.current"
+# context: "pandas.temperature"
+# type: "line"
+# units: "Celsius"
+# df_steps: >
+# pd.DataFrame.from_dict(
+# {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&current_weather=true').json()['current_weather']
+# for (city,lat,lng)
+# in [
+# ('dublin', 53.3441, -6.2675),
+# ('athens', 37.9792, 23.7166),
+# ('london', 51.5002, -0.1262),
+# ('berlin', 52.5235, 13.4115),
+# ('paris', 48.8567, 2.3510),
+# ('madrid', 40.4167, -3.7033),
+# ('new_york', 40.71, -74.01),
+# ('los_angeles', 34.05, -118.24),
+# ]
+# }
+# );
+# df.transpose();
+# df[['temperature']];
+# df.transpose();
+
+# example showing a read_csv from a url and some light pandas data wrangling.
+# pull data in csv format from london demo server and then ratio of user cpus over system cpu averaged over last 60 seconds.
+# example_csv:
+# name: "example_csv"
+# update_every: 2
+# chart_configs:
+# - name: "london_system_cpu"
+# title: "London System CPU - Ratios"
+# family: "london_system_cpu"
+# context: "pandas"
+# type: "line"
+# units: "n"
+# df_steps: >
+# pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});
+# df.drop('time', axis=1);
+# df.mean().to_frame().transpose();
+# df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();
+# df.rename(columns={0:'average_user_system_ratio'});
+# df*100;
+
+# example showing a read_json from a url and some light pandas data wrangling.
+# pull data in json format (using requests.get() if json data is too complex for pd.read_json() ) from london demo server and work out 'total_bandwidth'.
+# example_json:
+# name: "example_json"
+# update_every: 2
+# chart_configs:
+# - name: "london_system_net"
+# title: "London System Net - Total Bandwidth"
+# family: "london_system_net"
+# context: "pandas"
+# type: "area"
+# units: "kilobits/s"
+# df_steps: >
+# pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);
+# df.drop('time', axis=1);
+# abs(df);
+# df.sum(axis=1).to_frame();
+# df.rename(columns={0:'total_bandwidth'});
+
+# example showing a read_xml from a url and some light pandas data wrangling.
+# pull weather forecast data in xml format, use xpath to pull out temperature forecast.
+# example_xml:
+# name: "example_xml"
+# update_every: 2
+# line_sep: "|"
+# chart_configs:
+# - name: "temperature_forcast"
+# title: "Temperature Forecast"
+# family: "temp"
+# context: "pandas.temp"
+# type: "line"
+# units: "celsius"
+# df_steps: >
+# pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|
+# df.rename(columns={'value': 'dublin'})|
+# df[['dublin']]|
+
+# example showing a read_sql from a postgres database using sqlalchemy.
+# note: example assumes a running postgress db on localhost with a netdata users and password netdata.
+# sql:
+# name: "sql"
+# update_every: 5
+# chart_configs:
+# - name: "sql"
+# title: "SQL Example"
+# family: "sql.example"
+# context: "example"
+# type: "line"
+# units: "percent"
+# df_steps: >
+# pd.read_sql_query(
+# sql='\
+# select \
+# random()*100 as metric_1, \
+# random()*100 as metric_2 \
+# ',
+# con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')
+# );
diff --git a/collectors/python.d.plugin/postfix/Makefile.inc b/collectors/python.d.plugin/postfix/Makefile.inc
new file mode 100644
index 00000000..f4091b21
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += postfix/postfix.chart.py
+dist_pythonconfig_DATA += postfix/postfix.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += postfix/README.md postfix/Makefile.inc
+
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
new file mode 120000
index 00000000..c62eb5c2
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -0,0 +1 @@
+integrations/postfix.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/postfix/integrations/postfix.md b/collectors/python.d.plugin/postfix/integrations/postfix.md
new file mode 100644
index 00000000..2bb99922
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/integrations/postfix.md
@@ -0,0 +1,151 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postfix/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postfix/metadata.yaml"
+sidebar_label: "Postfix"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Mail Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Postfix
+
+
+<img src="https://netdata.cloud/img/postfix.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: postfix
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Keep an eye on Postfix metrics for efficient mail server operations.
+Improve your mail server performance with Netdata's real-time metrics and built-in alerts.
+
+
+Monitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+Postfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.
+See the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The collector executes `postqueue -p` to get Postfix queue statistics.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Postfix instance
+
+These metrics refer to the entire monitored application.
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postfix.qemails | emails | emails |
+| postfix.qsize | size | KiB |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+There are no configuration examples.
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `postfix` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin postfix debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/postfix/metadata.yaml b/collectors/python.d.plugin/postfix/metadata.yaml
new file mode 100644
index 00000000..1bbb6116
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/metadata.yaml
@@ -0,0 +1,124 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: postfix
+ monitored_instance:
+ name: Postfix
+ link: https://www.postfix.org/
+ categories:
+ - data-collection.mail-servers
+ icon_filename: "postfix.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - postfix
+ - mail
+ - mail server
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ Keep an eye on Postfix metrics for efficient mail server operations.
+
+ Improve your mail server performance with Netdata's real-time metrics and built-in alerts.
+ method_description: >
+ Monitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: >
+ Postfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view
+ the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to
+ view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.
+
+ See the `authorized_mailq_users` setting in
+ the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.
+ default_behavior:
+ auto_detection:
+ description: "The collector executes `postqueue -p` to get Postfix queue statistics."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: ""
+ description: ""
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 1
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: |
+ These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: postfix.qemails
+ description: Postfix Queue Emails
+ unit: "emails"
+ chart_type: line
+ dimensions:
+ - name: emails
+ - name: postfix.qsize
+ description: Postfix Queue Emails Size
+ unit: "KiB"
+ chart_type: area
+ dimensions:
+ - name: size
diff --git a/collectors/python.d.plugin/postfix/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
new file mode 100644
index 00000000..b650514e
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/postfix.chart.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Description: postfix netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+
+POSTQUEUE_COMMAND = 'postqueue -p'
+
+ORDER = [
+ 'qemails',
+ 'qsize',
+]
+
+CHARTS = {
+ 'qemails': {
+ 'options': [None, 'Postfix Queue Emails', 'emails', 'queue', 'postfix.qemails', 'line'],
+ 'lines': [
+ ['emails', None, 'absolute']
+ ]
+ },
+ 'qsize': {
+ 'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'],
+ 'lines': [
+ ['size', None, 'absolute']
+ ]
+ }
+}
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.command = POSTQUEUE_COMMAND
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()[-1].split(' ')
+ if raw[0] == 'Mail' and raw[1] == 'queue':
+ return {'emails': 0,
+ 'size': 0}
+
+ return {'emails': raw[4],
+ 'size': raw[1]}
+ except (ValueError, AttributeError):
+ return None
diff --git a/collectors/python.d.plugin/postfix/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf
new file mode 100644
index 00000000..a4d2472e
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/postfix.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for postfix
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# postfix is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, postfix also supports the following:
+#
+# command: 'postqueue -p' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'postqueue -p'
diff --git a/collectors/python.d.plugin/puppet/Makefile.inc b/collectors/python.d.plugin/puppet/Makefile.inc
new file mode 100644
index 00000000..fe94b925
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += puppet/puppet.chart.py
+dist_pythonconfig_DATA += puppet/puppet.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += puppet/README.md puppet/Makefile.inc
+
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
new file mode 120000
index 00000000..b6c4c83f
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -0,0 +1 @@
+integrations/puppet.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/puppet/integrations/puppet.md b/collectors/python.d.plugin/puppet/integrations/puppet.md
new file mode 100644
index 00000000..ca190b57
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/integrations/puppet.md
@@ -0,0 +1,215 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/puppet/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/puppet/metadata.yaml"
+sidebar_label: "Puppet"
+learn_status: "Published"
+learn_rel_path: "Data Collection/CICD Platforms"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Puppet
+
+
+<img src="https://netdata.cloud/img/puppet.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: puppet
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'
+
+
+It uses Puppet's metrics API endpoint to gather the metrics.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Puppet instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| puppet.jvm | committed, used | MiB |
+| puppet.jvm | committed, used | MiB |
+| puppet.cpu | execution, GC | percentage |
+| puppet.fdopen | used | descriptors |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/puppet.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/puppet.conf
+```
+#### Options
+
+This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+> Notes:
+> - Exact Fully Qualified Domain Name of the node should be used.
+> - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.
+> - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes |
+| tls_verify | Control HTTPS server certificate verification. | False | no |
+| tls_ca_file | Optional CA (bundle) file to use | | no |
+| tls_cert_file | Optional client certificate file | | no |
+| tls_key_file | Optional client key file | | no |
+| update_every | Sets the default data collection frequency. | 30 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration
+
+```yaml
+puppetserver:
+ url: 'https://fqdn.example.com:8140'
+ autodetection_retry: 1
+
+```
+##### TLS Certificate
+
+An example using a TLS certificate
+
+<details><summary>Config</summary>
+
+```yaml
+puppetdb:
+ url: 'https://fqdn.example.com:8081'
+ tls_cert_file: /path/to/client.crt
+ tls_key_file: /path/to/client.key
+ autodetection_retry: 1
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+puppetserver1:
+ url: 'https://fqdn.example.com:8140'
+ autodetection_retry: 1
+
+puppetserver2:
+ url: 'https://fqdn.example2.com:8140'
+ autodetection_retry: 1
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `puppet` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin puppet debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/puppet/metadata.yaml b/collectors/python.d.plugin/puppet/metadata.yaml
new file mode 100644
index 00000000..781519b6
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/metadata.yaml
@@ -0,0 +1,185 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: puppet
+ monitored_instance:
+ name: Puppet
+ link: "https://www.puppet.com/"
+ categories:
+ - data-collection.ci-cd-systems
+ icon_filename: "puppet.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - puppet
+ - jvm heap
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'
+ method_description: |
+ It uses Puppet's metrics API endpoint to gather the metrics.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: By default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "python.d/puppet.conf"
+ options:
+ description: |
+ This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
+
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+ > Notes:
+ > - Exact Fully Qualified Domain Name of the node should be used.
+ > - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.
+ > - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: url
+ description: HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used.
+ default_value: https://fqdn.example.com:8081
+ required: true
+ - name: tls_verify
+ description: Control HTTPS server certificate verification.
+ default_value: "False"
+ required: false
+ - name: tls_ca_file
+ description: Optional CA (bundle) file to use
+ default_value: ""
+ required: false
+ - name: tls_cert_file
+ description: Optional client certificate file
+ default_value: ""
+ required: false
+ - name: tls_key_file
+ description: Optional client key file
+ default_value: ""
+ required: false
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 30
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: >
+ Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic example configuration
+ folding:
+ enabled: false
+ config: |
+ puppetserver:
+ url: 'https://fqdn.example.com:8140'
+ autodetection_retry: 1
+ - name: TLS Certificate
+ description: An example using a TLS certificate
+ config: |
+ puppetdb:
+ url: 'https://fqdn.example.com:8081'
+ tls_cert_file: /path/to/client.crt
+ tls_key_file: /path/to/client.key
+ autodetection_retry: 1
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ puppetserver1:
+ url: 'https://fqdn.example.com:8140'
+ autodetection_retry: 1
+
+ puppetserver2:
+ url: 'https://fqdn.example2.com:8140'
+ autodetection_retry: 1
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: puppet.jvm
+ description: JVM Heap
+ unit: "MiB"
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: puppet.jvm
+ description: JVM Non-Heap
+ unit: "MiB"
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: puppet.cpu
+ description: CPU usage
+ unit: "percentage"
+ chart_type: stacked
+ dimensions:
+ - name: execution
+ - name: GC
+ - name: puppet.fdopen
+ description: File Descriptors
+ unit: "descriptors"
+ chart_type: line
+ dimensions:
+ - name: used
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
new file mode 100644
index 00000000..f8adf600
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Description: puppet netdata python.d module
+# Author: Andrey Galkin <andrey@futoin.org> (andvgal)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This module should work both with OpenSource and PE versions
+# of PuppetServer and PuppetDB.
+#
+# NOTE: PuppetDB may be configured to require proper TLS
+# client certificate for security reasons. Use tls_key_file
+# and tls_cert_file options then.
+#
+
+import socket
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+update_every = 5
+
+MiB = 1 << 20
+CPU_SCALE = 1000
+
+ORDER = [
+ 'jvm_heap',
+ 'jvm_nonheap',
+ 'cpu',
+ 'fd_open',
+]
+
+CHARTS = {
+ 'jvm_heap': {
+ 'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_heap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_heap_used', 'used', 'absolute', 1, MiB],
+ ],
+ 'variables': [
+ ['jvm_heap_max'],
+ ['jvm_heap_init'],
+ ],
+ },
+ 'jvm_nonheap': {
+ 'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
+ 'lines': [
+ ['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB],
+ ['jvm_nonheap_used', 'used', 'absolute', 1, MiB],
+ ],
+ 'variables': [
+ ['jvm_nonheap_max'],
+ ['jvm_nonheap_init'],
+ ],
+ },
+ 'cpu': {
+ 'options': [None, 'CPU usage', 'percentage', 'resources', 'puppet.cpu', 'stacked'],
+ 'lines': [
+ ['cpu_time', 'execution', 'absolute', 1, CPU_SCALE],
+ ['gc_time', 'GC', 'absolute', 1, CPU_SCALE],
+ ]
+ },
+ 'fd_open': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'resources', 'puppet.fdopen', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute'],
+ ],
+ 'variables': [
+ ['fd_max'],
+ ],
+ },
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = 'https://{0}:8140'.format(socket.getfqdn())
+
+ def _get_data(self):
+ # NOTE: there are several ways to retrieve data
+ # 1. Only PE versions:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html
+ # 2. Individual Metrics API (JMX):
+ # https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html
+ # 3. Extended status at debug level:
+ # https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html
+ #
+ # For sake of simplicity and efficiency the status one is used..
+
+ raw_data = self._get_raw_data(self.url + '/status/v1/services?level=debug')
+
+ if raw_data is None:
+ return None
+
+ raw_data = loads(raw_data)
+ data = {}
+
+ try:
+ try:
+ jvm_metrics = raw_data['status-service']['status']['experimental']['jvm-metrics']
+ except KeyError:
+ jvm_metrics = raw_data['status-service']['status']['jvm-metrics']
+
+ heap_mem = jvm_metrics['heap-memory']
+ non_heap_mem = jvm_metrics['non-heap-memory']
+
+ for k in ['max', 'committed', 'used', 'init']:
+ data['jvm_heap_' + k] = heap_mem[k]
+ data['jvm_nonheap_' + k] = non_heap_mem[k]
+
+ fd_open = jvm_metrics['file-descriptors']
+ data['fd_max'] = fd_open['max']
+ data['fd_used'] = fd_open['used']
+
+ data['cpu_time'] = int(jvm_metrics['cpu-usage'] * CPU_SCALE)
+ data['gc_time'] = int(jvm_metrics['gc-cpu-usage'] * CPU_SCALE)
+ except KeyError:
+ pass
+
+ return data or None
diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf
new file mode 100644
index 00000000..ff5c3d02
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/puppet.conf
@@ -0,0 +1,94 @@
+# netdata python.d.plugin configuration for Puppet Server and Puppet DB
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# These configuration comes from UrlService base:
+# url: # HTTP or HTTPS URL
+# tls_verify: False # Control HTTPS server certificate verification
+# tls_ca_file: # Optional CA (bundle) file to use
+# tls_cert_file: # Optional client certificate file
+# tls_key_file: # Optional client key file
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+# puppet:
+# url: 'https://<FQDN>:8140'
+#
+
+#
+# Production configuration should look like below.
+#
+# NOTE: usually Puppet Server/DB startup time is VERY long. So, there should
+# be quite reasonable retry count.
+#
+# NOTE: secure PuppetDB config may require client certificate.
+# Not applies to default PuppetDB configuration though.
+#
+# puppetdb:
+# url: 'https://fqdn.example.com:8081'
+# tls_cert_file: /path/to/client.crt
+# tls_key_file: /path/to/client.key
+# autodetection_retry: 1
+#
+# puppetserver:
+# url: 'https://fqdn.example.com:8140'
+# autodetection_retry: 1
+#
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
new file mode 100644
index 00000000..3953ce2b
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.conf
@@ -0,0 +1,78 @@
+# netdata python.d.plugin configuration
+#
+# This file is in YaML format.
+# Generally the format is:
+#
+# name: value
+#
+
+# Enable / disable the whole python.d.plugin (all its modules)
+enabled: yes
+
+# ----------------------------------------------------------------------
+# Enable / Disable python.d.plugin modules
+#default_run: yes
+#
+# If "default_run" = "yes" the default for all modules is enabled (yes).
+# Setting any of these to "no" will disable it.
+#
+# If "default_run" = "no" the default for all modules is disabled (no).
+# Setting any of these to "yes" will enable it.
+
+# Enable / Disable explicit garbage collection (full collection run). Default is enabled.
+gc_run: yes
+
+# Garbage collection interval in seconds. Default is 300.
+gc_interval: 300
+
+# adaptec_raid: yes
+# alarms: yes
+# am2320: yes
+# anomalies: no
+# beanstalk: yes
+# bind_rndc: yes
+# boinc: yes
+# ceph: yes
+# changefinder: no
+# dovecot: yes
+
+# this is just an example
+example: no
+
+# exim: yes
+# fail2ban: yes
+# gearman: yes
+go_expvar: no
+
+# haproxy: yes
+# hddtemp: yes
+hpssa: no
+# icecast: yes
+# ipfs: yes
+# litespeed: yes
+# megacli: yes
+# memcached: yes
+# mongodb: yes
+# monit: yes
+# nvidia_smi: yes
+# nsd: yes
+# openldap: yes
+# oracledb: yes
+# pandas: yes
+# postfix: yes
+# puppet: yes
+# rethinkdbs: yes
+# retroshare: yes
+# riakkv: yes
+# samba: yes
+# sensors: yes
+# smartd_log: yes
+# spigotmc: yes
+# squid: yes
+# traefik: yes
+# tomcat: yes
+# tor: yes
+# uwsgi: yes
+# varnish: yes
+# w1sensor: yes
+# zscores: no
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
new file mode 100644
index 00000000..86fea209
--- /dev/null
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -0,0 +1,946 @@
+#!/usr/bin/env bash
+'''':;
+pybinary=$(which python3 || which python || which python2)
+filtered=()
+for arg in "$@"
+do
+ case $arg in
+ -p*) pybinary=${arg:2}
+ shift 1 ;;
+ *) filtered+=("$arg") ;;
+ esac
+done
+if [ "$pybinary" = "" ]
+then
+ echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM"
+ exit 1
+fi
+exec "$pybinary" "$0" "${filtered[@]}" # '''
+
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (l2isbad)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import collections
+import copy
+import gc
+import json
+import os
+import pprint
+import re
+import sys
+import threading
+import time
+import types
+
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+PY_VERSION = sys.version_info[:2] # (major=3, minor=7, micro=3, releaselevel='final', serial=0)
+
+if PY_VERSION > (3, 1):
+ from importlib.machinery import SourceFileLoader
+else:
+ from imp import load_source as SourceFileLoader
+
+ENV_NETDATA_USER_CONFIG_DIR = 'NETDATA_USER_CONFIG_DIR'
+ENV_NETDATA_STOCK_CONFIG_DIR = 'NETDATA_STOCK_CONFIG_DIR'
+ENV_NETDATA_PLUGINS_DIR = 'NETDATA_PLUGINS_DIR'
+ENV_NETDATA_USER_PLUGINS_DIRS = 'NETDATA_USER_PLUGINS_DIRS'
+ENV_NETDATA_LIB_DIR = 'NETDATA_LIB_DIR'
+ENV_NETDATA_UPDATE_EVERY = 'NETDATA_UPDATE_EVERY'
+ENV_NETDATA_LOCK_DIR = 'NETDATA_LOCK_DIR'
+
+
+def add_pythond_packages():
+ pluginsd = os.getenv(ENV_NETDATA_PLUGINS_DIR, os.path.dirname(__file__))
+ pythond = os.path.abspath(pluginsd + '/../python.d')
+ packages = os.path.join(pythond, 'python_modules')
+ sys.path.append(packages)
+
+
+add_pythond_packages()
+
+from bases.collection import safe_print
+from bases.loggers import PythonDLogger
+from bases.loaders import load_config
+from third_party import filelock
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+
+def dirs():
+ var_lib = os.getenv(
+ ENV_NETDATA_LIB_DIR,
+ '@varlibdir_POST@',
+ )
+ plugin_user_config = os.getenv(
+ ENV_NETDATA_USER_CONFIG_DIR,
+ '@configdir_POST@',
+ )
+ plugin_stock_config = os.getenv(
+ ENV_NETDATA_STOCK_CONFIG_DIR,
+ '@libconfigdir_POST@',
+ )
+ pluginsd = os.getenv(
+ ENV_NETDATA_PLUGINS_DIR,
+ os.path.dirname(__file__),
+ )
+ locks = os.getenv(
+ ENV_NETDATA_LOCK_DIR,
+ os.path.join('@varlibdir_POST@', 'lock')
+ )
+ modules_user_config = os.path.join(plugin_user_config, 'python.d')
+ modules_stock_config = os.path.join(plugin_stock_config, 'python.d')
+ modules = os.path.abspath(pluginsd + '/../python.d')
+ user_modules = [os.path.join(p, 'python.d') for p in
+ os.getenv(ENV_NETDATA_USER_PLUGINS_DIRS, "").split(" ") if
+ p]
+
+ Dirs = collections.namedtuple(
+ 'Dirs',
+ [
+ 'plugin_user_config',
+ 'plugin_stock_config',
+ 'modules_user_config',
+ 'modules_stock_config',
+ 'modules',
+ 'user_modules',
+ 'var_lib',
+ 'locks',
+ ]
+ )
+ return Dirs(
+ plugin_user_config,
+ plugin_stock_config,
+ modules_user_config,
+ modules_stock_config,
+ modules,
+ user_modules,
+ var_lib,
+ locks,
+ )
+
+
+DIRS = dirs()
+
+IS_ATTY = sys.stdout.isatty() or sys.stderr.isatty()
+
+MODULE_SUFFIX = '.chart.py'
+
+
+def find_available_modules(*directories):
+ AvailableModule = collections.namedtuple(
+ 'AvailableModule',
+ [
+ 'filepath',
+ 'name',
+ ]
+ )
+ available = list()
+ for d in directories:
+ try:
+ if not os.path.isdir(d):
+ continue
+ files = sorted(os.listdir(d))
+ except OSError:
+ continue
+ modules = [m for m in files if m.endswith(MODULE_SUFFIX)]
+ available.extend([AvailableModule(os.path.join(d, m), m[:-len(MODULE_SUFFIX)]) for m in modules])
+
+ return available
+
+
+def available_modules():
+ obsolete = (
+ 'apache_cache', # replaced by web_log
+ 'cpuidle', # rewritten in C
+ 'cpufreq', # rewritten in C
+ 'gunicorn_log', # replaced by web_log
+ 'linux_power_supply', # rewritten in C
+ 'nginx_log', # replaced by web_log
+ 'mdstat', # rewritten in C
+ 'sslcheck', # rewritten in Go, memory leak bug https://github.com/netdata/netdata/issues/5624
+ 'unbound', # rewritten in Go
+ )
+
+ stock = [m for m in find_available_modules(DIRS.modules) if m.name not in obsolete]
+ user = find_available_modules(*DIRS.user_modules)
+
+ available, seen = list(), set()
+ for m in user + stock:
+ if m.name in seen:
+ continue
+ seen.add(m.name)
+ available.append(m)
+
+ return available
+
+
+AVAILABLE_MODULES = available_modules()
+
+JOB_BASE_CONF = {
+ 'update_every': int(os.getenv(ENV_NETDATA_UPDATE_EVERY, 1)),
+ 'priority': 60000,
+ 'autodetection_retry': 0,
+ 'chart_cleanup': 10,
+ 'penalty': True,
+ 'name': str(),
+}
+
+PLUGIN_BASE_CONF = {
+ 'enabled': True,
+ 'default_run': True,
+ 'gc_run': True,
+ 'gc_interval': 300,
+}
+
+
+def multi_path_find(name, *paths):
+ for path in paths:
+ abs_name = os.path.join(path, name)
+ if os.path.isfile(abs_name):
+ return abs_name
+ return str()
+
+
+def load_module(name, filepath):
+ module = SourceFileLoader('pythond_' + name, filepath)
+ if isinstance(module, types.ModuleType):
+ return module
+ return module.load_module()
+
+
+class ModuleConfig:
+ def __init__(self, name, config=None):
+ self.name = name
+ self.config = config or OrderedDict()
+ self.is_stock = False
+
+ def load(self, abs_path):
+ if not IS_ATTY:
+ self.is_stock = abs_path.startswith(DIRS.modules_stock_config)
+ self.config.update(load_config(abs_path) or dict())
+
+ def defaults(self):
+ keys = (
+ 'update_every',
+ 'priority',
+ 'autodetection_retry',
+ 'chart_cleanup',
+ 'penalty',
+ )
+ return dict((k, self.config[k]) for k in keys if k in self.config)
+
+ def create_job(self, job_name, job_config=None):
+ job_config = job_config or dict()
+
+ config = OrderedDict()
+ config.update(job_config)
+ config['job_name'] = job_name
+ config['__is_stock'] = self.is_stock
+ for k, v in self.defaults().items():
+ config.setdefault(k, v)
+
+ return config
+
+ def job_names(self):
+ return [v for v in self.config if isinstance(self.config.get(v), dict)]
+
+ def single_job(self):
+ return [self.create_job(self.name, self.config)]
+
+ def multi_job(self):
+ return [self.create_job(n, self.config[n]) for n in self.job_names()]
+
+ def create_jobs(self):
+ return self.multi_job() or self.single_job()
+
+
+class JobsConfigsBuilder:
+ def __init__(self, config_dirs):
+ self.config_dirs = config_dirs
+ self.log = PythonDLogger()
+ self.job_defaults = None
+ self.module_defaults = None
+ self.min_update_every = None
+
+ def load_module_config(self, module_name):
+ name = '{0}.conf'.format(module_name)
+ self.log.debug("[{0}] looking for '{1}' in {2}".format(module_name, name, self.config_dirs))
+ config = ModuleConfig(module_name)
+
+ abs_path = multi_path_find(name, *self.config_dirs)
+ if not abs_path:
+ self.log.warning("[{0}] '{1}' was not found".format(module_name, name))
+ return config
+
+ self.log.debug("[{0}] loading '{1}'".format(module_name, abs_path))
+ try:
+ config.load(abs_path)
+ except Exception as error:
+ self.log.error("[{0}] error on loading '{1}' : {2}".format(module_name, abs_path, repr(error)))
+ return None
+
+ self.log.debug("[{0}] '{1}' is loaded".format(module_name, abs_path))
+ return config
+
+ @staticmethod
+ def apply_defaults(jobs, defaults):
+ if defaults is None:
+ return
+ for k, v in defaults.items():
+ for job in jobs:
+ job.setdefault(k, v)
+
+ def set_min_update_every(self, jobs, min_update_every):
+ if min_update_every is None:
+ return
+ for job in jobs:
+ if 'update_every' in job and job['update_every'] < self.min_update_every:
+ job['update_every'] = self.min_update_every
+
+ def build(self, module_name):
+ config = self.load_module_config(module_name)
+ if config is None:
+ return None
+
+ configs = config.create_jobs()
+ if not config.is_stock:
+ self.log.info("[{0}] built {1} job(s) configs".format(module_name, len(configs)))
+
+ self.apply_defaults(configs, self.module_defaults)
+ self.apply_defaults(configs, self.job_defaults)
+ self.set_min_update_every(configs, self.min_update_every)
+
+ return configs
+
+
+JOB_STATUS_ACTIVE = 'active'
+JOB_STATUS_RECOVERING = 'recovering'
+JOB_STATUS_DROPPED = 'dropped'
+JOB_STATUS_INIT = 'initial'
+
+
+class Job(threading.Thread):
+ inf = -1
+
+ def __init__(self, service, module_name, config):
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.service = service
+ self.module_name = module_name
+ self.config = config
+ self.real_name = config['job_name']
+ self.actual_name = config['override_name'] or self.real_name
+ self.autodetection_retry = config['autodetection_retry']
+ self.checks = self.inf
+ self.job = None
+ self.is_stock = config.get('__is_stock', False)
+ self.status = JOB_STATUS_INIT
+
+ def is_inited(self):
+ return self.job is not None
+
+ def init(self):
+ self.job = self.service(configuration=copy.deepcopy(self.config))
+
+ def full_name(self):
+ return self.job.name
+
+ def check(self):
+ if self.is_stock:
+ self.job.logger.mute()
+
+ ok = self.job.check()
+
+ self.job.logger.unmute()
+ self.checks -= self.checks != self.inf and not ok
+
+ return ok
+
+ def create(self):
+ self.job.create()
+
+ def need_to_recheck(self):
+ return self.autodetection_retry != 0 and self.checks != 0
+
+ def run(self):
+ self.job.run()
+
+
+class ModuleSrc:
+ def __init__(self, m):
+ self.name = m.name
+ self.filepath = m.filepath
+ self.src = None
+
+ def load(self):
+ self.src = load_module(self.name, self.filepath)
+
+ def get(self, key):
+ return getattr(self.src, key, None)
+
+ def service(self):
+ return self.get('Service')
+
+ def defaults(self):
+ keys = (
+ 'update_every',
+ 'priority',
+ 'autodetection_retry',
+ 'chart_cleanup',
+ 'penalty',
+ )
+ return dict((k, self.get(k)) for k in keys if self.get(k) is not None)
+
+ def is_disabled_by_default(self):
+ return bool(self.get('disabled_by_default'))
+
+
+class JobsStatuses:
+ def __init__(self):
+ self.items = OrderedDict()
+
+ def dump(self):
+ return json.dumps(self.items, indent=2)
+
+ def get(self, module_name, job_name):
+ if module_name not in self.items:
+ return None
+ return self.items[module_name].get(job_name)
+
+ def has(self, module_name, job_name):
+ return self.get(module_name, job_name) is not None
+
+ def from_file(self, path):
+ with open(path) as f:
+ data = json.load(f)
+ return self.from_json(data)
+
+ @staticmethod
+ def from_json(items):
+ if not isinstance(items, dict):
+ raise Exception('items obj has wrong type : {0}'.format(type(items)))
+ if not items:
+ return JobsStatuses()
+
+ v = OrderedDict()
+ for mod_name in sorted(items):
+ if not items[mod_name]:
+ continue
+ v[mod_name] = OrderedDict()
+ for job_name in sorted(items[mod_name]):
+ v[mod_name][job_name] = items[mod_name][job_name]
+
+ rv = JobsStatuses()
+ rv.items = v
+ return rv
+
+ @staticmethod
+ def from_jobs(jobs):
+ v = OrderedDict()
+ for job in jobs:
+ status = job.status
+ if status not in (JOB_STATUS_ACTIVE, JOB_STATUS_RECOVERING):
+ continue
+ if job.module_name not in v:
+ v[job.module_name] = OrderedDict()
+ v[job.module_name][job.real_name] = status
+
+ rv = JobsStatuses()
+ rv.items = v
+ return rv
+
+
+class StdoutSaver:
+ @staticmethod
+ def save(dump):
+ print(dump)
+
+
+class CachedFileSaver:
+ def __init__(self, path):
+ self.last_save_success = False
+ self.last_saved_dump = str()
+ self.path = path
+
+ def save(self, dump):
+ if self.last_save_success and self.last_saved_dump == dump:
+ return
+ try:
+ with open(self.path, 'w') as out:
+ out.write(dump)
+ except Exception:
+ self.last_save_success = False
+ raise
+ self.last_saved_dump = dump
+ self.last_save_success = True
+
+
+class PluginConfig(dict):
+ def __init__(self, *args):
+ dict.__init__(self, *args)
+
+ def is_module_explicitly_enabled(self, module_name):
+ return self._is_module_enabled(module_name, True)
+
+ def is_module_enabled(self, module_name):
+ return self._is_module_enabled(module_name, False)
+
+ def _is_module_enabled(self, module_name, explicit):
+ if module_name in self:
+ return self[module_name]
+ if explicit:
+ return False
+ return self['default_run']
+
+
+class FileLockRegistry:
+ def __init__(self, path):
+ self.path = path
+ self.locks = dict()
+
+ @staticmethod
+ def rename(name):
+ # go version name is 'docker'
+ if name.startswith("dockerd"):
+ name = "docker" + name[7:]
+ return name
+
+ def register(self, name):
+ name = self.rename(name)
+ if name in self.locks:
+ return
+ file = os.path.join(self.path, '{0}.collector.lock'.format(name))
+ lock = filelock.FileLock(file)
+ lock.acquire(timeout=0)
+ self.locks[name] = lock
+
+ def unregister(self, name):
+ name = self.rename(name)
+ if name not in self.locks:
+ return
+ lock = self.locks[name]
+ lock.release()
+ del self.locks[name]
+
+
+class DummyRegistry:
+ def register(self, name):
+ pass
+
+ def unregister(self, name):
+ pass
+
+
+class Plugin:
+ config_name = 'python.d.conf'
+ jobs_status_dump_name = 'pythond-jobs-statuses.json'
+
+ def __init__(self, modules_to_run, min_update_every, registry):
+ self.modules_to_run = modules_to_run
+ self.min_update_every = min_update_every
+ self.config = PluginConfig(PLUGIN_BASE_CONF)
+ self.log = PythonDLogger()
+ self.registry = registry
+ self.started_jobs = collections.defaultdict(dict)
+ self.jobs = list()
+ self.saver = None
+ self.runs = 0
+
+ def load_config_file(self, filepath, expected):
+ self.log.debug("looking for '{0}'".format(filepath))
+ if not os.path.isfile(filepath):
+ log = self.log.info if not expected else self.log.error
+ log("'{0}' was not found".format(filepath))
+ return dict()
+ try:
+ config = load_config(filepath)
+ except Exception as error:
+ self.log.error("error on loading '{0}' : {1}".format(filepath, repr(error)))
+ return dict()
+ self.log.debug("'{0}' is loaded".format(filepath))
+ return config
+
+ def load_config(self):
+ user_config = self.load_config_file(
+ filepath=os.path.join(DIRS.plugin_user_config, self.config_name),
+ expected=False,
+ )
+ stock_config = self.load_config_file(
+ filepath=os.path.join(DIRS.plugin_stock_config, self.config_name),
+ expected=True,
+ )
+ self.config.update(stock_config)
+ self.config.update(user_config)
+
+ def load_job_statuses(self):
+ self.log.debug("looking for '{0}' in {1}".format(self.jobs_status_dump_name, DIRS.var_lib))
+ abs_path = multi_path_find(self.jobs_status_dump_name, DIRS.var_lib)
+ if not abs_path:
+ self.log.warning("'{0}' was not found".format(self.jobs_status_dump_name))
+ return
+
+ self.log.debug("loading '{0}'".format(abs_path))
+ try:
+ statuses = JobsStatuses().from_file(abs_path)
+ except Exception as error:
+ self.log.error("'{0}' invalid JSON format: {1}".format(
+ abs_path, ' '.join([v.strip() for v in str(error).split('\n')])))
+ return None
+ self.log.debug("'{0}' is loaded".format(abs_path))
+ return statuses
+
+ def create_jobs(self, job_statuses=None):
+ paths = [
+ DIRS.modules_user_config,
+ DIRS.modules_stock_config,
+ ]
+
+ builder = JobsConfigsBuilder(paths)
+ builder.job_defaults = JOB_BASE_CONF
+ builder.min_update_every = self.min_update_every
+
+ jobs = list()
+ for m in self.modules_to_run:
+ if not self.config.is_module_enabled(m.name):
+ self.log.info("[{0}] is disabled in the configuration file, skipping it".format(m.name))
+ continue
+
+ src = ModuleSrc(m)
+ try:
+ src.load()
+ except Exception as error:
+ self.log.warning("[{0}] error on loading source : {1}, skipping it".format(m.name, repr(error)))
+ continue
+ self.log.debug("[{0}] loaded module source : '{1}'".format(m.name, m.filepath))
+
+ if not (src.service() and callable(src.service())):
+ self.log.warning("[{0}] has no callable Service object, skipping it".format(m.name))
+ continue
+
+ if src.is_disabled_by_default() and not self.config.is_module_explicitly_enabled(m.name):
+ self.log.info("[{0}] is disabled by default, skipping it".format(m.name))
+ continue
+
+ builder.module_defaults = src.defaults()
+ configs = builder.build(m.name)
+ if not configs:
+ self.log.info("[{0}] has no job configs, skipping it".format(m.name))
+ continue
+
+ for config in configs:
+ config['job_name'] = re.sub(r'\s+', '_', config['job_name'])
+ config['override_name'] = re.sub(r'\s+', '_', config.pop('name'))
+
+ job = Job(src.service(), m.name, config)
+
+ was_previously_active = job_statuses and job_statuses.has(job.module_name, job.real_name)
+ if was_previously_active and job.autodetection_retry == 0:
+ self.log.debug('{0}[{1}] was previously active, applying recovering settings'.format(
+ job.module_name, job.real_name))
+ job.checks = 11
+ job.autodetection_retry = 30
+
+ jobs.append(job)
+
+ return jobs
+
+ def setup(self):
+ self.load_config()
+
+ if not self.config['enabled']:
+ self.log.info('disabled in the configuration file')
+ return False
+
+ statuses = self.load_job_statuses()
+
+ self.jobs = self.create_jobs(statuses)
+ if not self.jobs:
+ self.log.info('no jobs to run')
+ return False
+
+ if not IS_ATTY:
+ abs_path = os.path.join(DIRS.var_lib, self.jobs_status_dump_name)
+ self.saver = CachedFileSaver(abs_path)
+ return True
+
+ def start_jobs(self, *jobs):
+ for job in jobs:
+ if job.status not in (JOB_STATUS_INIT, JOB_STATUS_RECOVERING):
+ continue
+
+ if job.actual_name in self.started_jobs[job.module_name]:
+ self.log.info('{0}[{1}] : already served by another job, skipping it'.format(
+ job.module_name, job.real_name))
+ job.status = JOB_STATUS_DROPPED
+ continue
+
+ if not job.is_inited():
+ try:
+ job.init()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on init : {2}, skipping the job".format(
+ job.module_name, job.real_name, repr(error)))
+ job.status = JOB_STATUS_DROPPED
+ continue
+
+ try:
+ ok = job.check()
+ except Exception as error:
+ if not job.is_stock:
+ self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
+ job.module_name, job.real_name, repr(error)))
+ job.status = JOB_STATUS_DROPPED
+ continue
+ if not ok:
+ if not job.is_stock:
+ self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.real_name))
+ job.status = JOB_STATUS_RECOVERING if job.need_to_recheck() else JOB_STATUS_DROPPED
+ continue
+ self.log.info('{0}[{1}] : check success'.format(job.module_name, job.real_name))
+
+ try:
+ self.registry.register(job.full_name())
+ except filelock.Timeout as error:
+ self.log.info('{0}[{1}] : already registered by another process, skipping the job ({2})'.format(
+ job.module_name, job.real_name, error))
+ job.status = JOB_STATUS_DROPPED
+ continue
+ except Exception as error:
+ self.log.warning('{0}[{1}] : registration failed: {2}, skipping the job'.format(
+ job.module_name, job.real_name, error))
+ job.status = JOB_STATUS_DROPPED
+ continue
+
+ try:
+ job.create()
+ except Exception as error:
+ self.log.warning("{0}[{1}] : unhandled exception on create : {2}, skipping the job".format(
+ job.module_name, job.real_name, repr(error)))
+ job.status = JOB_STATUS_DROPPED
+ try:
+ self.registry.unregister(job.full_name())
+ except Exception as error:
+ self.log.warning('{0}[{1}] : deregistration failed: {2}'.format(
+ job.module_name, job.real_name, error))
+ continue
+
+ self.started_jobs[job.module_name] = job.actual_name
+ job.status = JOB_STATUS_ACTIVE
+ job.start()
+
+ @staticmethod
+ def keep_alive():
+ if not IS_ATTY:
+ safe_print('\n')
+
+ def garbage_collection(self):
+ if self.config['gc_run'] and self.runs % self.config['gc_interval'] == 0:
+ v = gc.collect()
+ self.log.debug('GC collection run result: {0}'.format(v))
+
+ def restart_recovering_jobs(self):
+ for job in self.jobs:
+ if job.status != JOB_STATUS_RECOVERING:
+ continue
+ if self.runs % job.autodetection_retry != 0:
+ continue
+ self.start_jobs(job)
+
+ def cleanup_jobs(self):
+ self.jobs = [j for j in self.jobs if j.status != JOB_STATUS_DROPPED]
+
+ def have_alive_jobs(self):
+ return next(
+ (True for job in self.jobs if job.status in (JOB_STATUS_RECOVERING, JOB_STATUS_ACTIVE)),
+ False,
+ )
+
+ def save_job_statuses(self):
+ if self.saver is None:
+ return
+ if self.runs % 10 != 0:
+ return
+ dump = JobsStatuses().from_jobs(self.jobs).dump()
+ try:
+ self.saver.save(dump)
+ except Exception as error:
+ self.log.error("error on saving jobs statuses dump : {0}".format(repr(error)))
+
+ def serve_once(self):
+ if not self.have_alive_jobs():
+ self.log.info('no jobs to serve')
+ return False
+
+ time.sleep(1)
+ self.runs += 1
+
+ self.keep_alive()
+ self.garbage_collection()
+ self.cleanup_jobs()
+ self.restart_recovering_jobs()
+ self.save_job_statuses()
+ return True
+
+ def serve(self):
+ while self.serve_once():
+ pass
+
+ def run(self):
+ self.start_jobs(*self.jobs)
+ self.serve()
+
+
+def parse_command_line():
+ opts = sys.argv[:][1:]
+
+ debug = False
+ trace = False
+ nolock = False
+ update_every = 1
+ modules_to_run = list()
+
+ def find_first_positive_int(values):
+ return next((v for v in values if v.isdigit() and int(v) >= 1), None)
+
+ u = find_first_positive_int(opts)
+ if u is not None:
+ update_every = int(u)
+ opts.remove(u)
+ if 'debug' in opts:
+ debug = True
+ opts.remove('debug')
+ if 'trace' in opts:
+ trace = True
+ opts.remove('trace')
+ if 'nolock' in opts:
+ nolock = True
+ opts.remove('nolock')
+ if opts:
+ modules_to_run = list(opts)
+
+ cmd = collections.namedtuple(
+ 'CMD',
+ [
+ 'update_every',
+ 'debug',
+ 'trace',
+ 'nolock',
+ 'modules_to_run',
+ ])
+ return cmd(
+ update_every,
+ debug,
+ trace,
+ nolock,
+ modules_to_run,
+ )
+
+
+def guess_module(modules, *names):
+ def guess(n):
+ found = None
+ for i, _ in enumerate(n):
+ cur = [x for x in modules if x.startswith(name[:i + 1])]
+ if not cur:
+ return found
+ found = cur
+ return found
+
+ guessed = list()
+ for name in names:
+ name = name.lower()
+ m = guess(name)
+ if m:
+ guessed.extend(m)
+ return sorted(set(guessed))
+
+
+def disable():
+ if not IS_ATTY:
+ safe_print('DISABLE')
+ exit(0)
+
+
+def get_modules_to_run(cmd):
+ if not cmd.modules_to_run:
+ return AVAILABLE_MODULES
+
+ modules_to_run, seen = list(), set()
+ for m in AVAILABLE_MODULES:
+ if m.name not in cmd.modules_to_run or m.name in seen:
+ continue
+ seen.add(m.name)
+ modules_to_run.append(m)
+
+ return modules_to_run
+
+
+def main():
+ cmd = parse_command_line()
+ log = PythonDLogger()
+
+ level = os.getenv('NETDATA_LOG_LEVEL') or str()
+ level = level.lower()
+ if level == 'debug':
+ log.logger.severity = 'DEBUG'
+ elif level == 'info':
+ log.logger.severity = 'INFO'
+ elif level == 'warn' or level == 'warning':
+ log.logger.severity = 'WARNING'
+ elif level == 'err' or level == 'error':
+ log.logger.severity = 'ERROR'
+
+ if cmd.debug:
+ log.logger.severity = 'DEBUG'
+ if cmd.trace:
+ log.log_traceback = True
+
+ log.info('using python v{0}'.format(PY_VERSION[0]))
+
+ if DIRS.locks and not cmd.nolock:
+ registry = FileLockRegistry(DIRS.locks)
+ else:
+ registry = DummyRegistry()
+
+ unique_avail_module_names = set([m.name for m in AVAILABLE_MODULES])
+ unknown = set(cmd.modules_to_run) - unique_avail_module_names
+ if unknown:
+ log.error('unknown modules : {0}'.format(sorted(list(unknown))))
+ guessed = guess_module(unique_avail_module_names, *cmd.modules_to_run)
+ if guessed:
+ log.info('probably you meant : \n{0}'.format(pprint.pformat(guessed, width=1)))
+ return
+
+ p = Plugin(
+ get_modules_to_run(cmd),
+ cmd.update_every,
+ registry,
+ )
+
+ # cheap attempt to reduce chance of python.d job running before go.d
+ # TODO: better implementation needed
+ if not IS_ATTY:
+ time.sleep(1.5)
+
+ try:
+ if not p.setup():
+ return
+ p.run()
+ except KeyboardInterrupt:
+ pass
+ log.info('exiting from main...')
+
+
+if __name__ == "__main__":
+ main()
+ disable()
diff --git a/collectors/python.d.plugin/python_modules/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
new file mode 100644
index 00000000..a74b4239
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from subprocess import Popen, PIPE
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import find_binary
+
+
+class ExecutableService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.command = None
+
+ def _get_raw_data(self, stderr=False, command=None):
+ """
+ Get raw data from executed command
+ :return: <list>
+ """
+ command = command or self.command
+ self.debug("Executing command '{0}'".format(' '.join(command)))
+ try:
+ p = Popen(command, stdout=PIPE, stderr=PIPE)
+ except Exception as error:
+ self.error('Executing command {0} resulted in error: {1}'.format(command, error))
+ return None
+
+ data = list()
+ std = p.stderr if stderr else p.stdout
+ for line in std:
+ try:
+ data.append(line.decode('utf-8'))
+ except (TypeError, UnicodeDecodeError):
+ continue
+
+ return data
+
+ def check(self):
+ """
+ Parse basic configuration, check if command is whitelisted and is returning values
+ :return: <boolean>
+ """
+ # Preference: 1. "command" from configuration file 2. "command" from plugin (if specified)
+ if 'command' in self.configuration:
+ self.command = self.configuration['command']
+
+ # "command" must be: 1.not None 2. type <str>
+ if not (self.command and isinstance(self.command, str)):
+ self.error('Command is not defined or command type is not <str>')
+ return False
+
+ # Split "command" into: 1. command <str> 2. options <list>
+ command, opts = self.command.split()[0], self.command.split()[1:]
+
+ # Check for "bad" symbols in options. No pipes, redirects etc.
+ opts_list = ['&', '|', ';', '>', '<']
+ bad_opts = set(''.join(opts)) & set(opts_list)
+ if bad_opts:
+ self.error("Bad command argument(s): {opts}".format(opts=bad_opts))
+ return False
+
+ # Find absolute path ('echo' => '/bin/echo')
+ if '/' not in command:
+ command = find_binary(command)
+ if not command:
+ self.error('Can\'t locate "{command}" binary'.format(command=self.command))
+ return False
+ # Check if binary exist and executable
+ else:
+ if not os.access(command, os.X_OK):
+ self.error('"{binary}" is not executable'.format(binary=command))
+ return False
+
+ self.command = [command] + opts if opts else [command]
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Command: {command}. Error: {error}'.format(command=self.command,
+ error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error('Command "{command}" returned no data'.format(command=self.command))
+ return False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
new file mode 100644
index 00000000..a55e33f5
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from glob import glob
+import sys
+import os
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class LogService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.log_path = self.configuration.get('path')
+ self.__glob_path = self.log_path
+ self._last_position = 0
+ self.__re_find = dict(current=0, run=0, maximum=60)
+ self.__open_args = {'errors': 'replace'} if sys.version_info[0] > 2 else {}
+
+ def _get_raw_data(self):
+ """
+ Get log lines since last poll
+ :return: list
+ """
+ lines = list()
+ try:
+ if self.__re_find['current'] == self.__re_find['run']:
+ self._find_recent_log_file()
+ size = os.path.getsize(self.log_path)
+ if size == self._last_position:
+ self.__re_find['current'] += 1
+ return list() # return empty list if nothing has changed
+ elif size < self._last_position:
+ self._last_position = 0 # read from beginning if file has shrunk
+
+ with open(self.log_path, **self.__open_args) as fp:
+ fp.seek(self._last_position)
+ for line in fp:
+ lines.append(line)
+ self._last_position = fp.tell()
+ self.__re_find['current'] = 0
+ except (OSError, IOError) as error:
+ self.__re_find['current'] += 1
+ self.error(str(error))
+
+ return lines or None
+
+ def _find_recent_log_file(self):
+ """
+ :return:
+ """
+ self.__re_find['run'] = self.__re_find['maximum']
+ self.__re_find['current'] = 0
+ self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files
+ path_list = glob(self.__glob_path)
+ if path_list:
+ self.log_path = max(path_list)
+ return True
+ return False
+
+ def check(self):
+ """
+ Parse basic configuration and check if log file exists
+ :return: boolean
+ """
+ if not self.log_path:
+ self.error('No path to log specified')
+ return None
+
+ if self._find_recent_log_file() and os.access(self.log_path, os.R_OK) and os.path.isfile(self.log_path):
+ return True
+ self.error('Cannot access {0}'.format(self.log_path))
+ return False
+
+ def create(self):
+ # set cursor at last byte of log file
+ self._last_position = os.path.getsize(self.log_path)
+ status = SimpleService.create(self)
+ return status
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
new file mode 100644
index 00000000..7f5c7d22
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from sys import exc_info
+
+try:
+ import MySQLdb
+
+ PY_MYSQL = True
+except ImportError:
+ try:
+ import pymysql as MySQLdb
+
+ PY_MYSQL = True
+ except ImportError:
+ PY_MYSQL = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+class MySQLService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.__connection = None
+ self.__conn_properties = dict()
+ self.extra_conn_properties = dict()
+ self.__queries = self.configuration.get('queries', dict())
+ self.queries = dict()
+
+ def __connect(self):
+ try:
+ connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties)
+ except (MySQLdb.MySQLError, TypeError, AttributeError) as error:
+ return None, str(error)
+ else:
+ return connection, None
+
+ def check(self):
+ def get_connection_properties(conf, extra_conf):
+ properties = dict()
+ if conf.get('user'):
+ properties['user'] = conf['user']
+ if conf.get('pass'):
+ properties['passwd'] = conf['pass']
+
+ if conf.get('socket'):
+ properties['unix_socket'] = conf['socket']
+ elif conf.get('host'):
+ properties['host'] = conf['host']
+ properties['port'] = int(conf.get('port', 3306))
+ elif conf.get('my.cnf'):
+ properties['read_default_file'] = conf['my.cnf']
+
+ if conf.get('ssl'):
+ properties['ssl'] = conf['ssl']
+
+ if isinstance(extra_conf, dict) and extra_conf:
+ properties.update(extra_conf)
+
+ return properties or None
+
+ def is_valid_queries_dict(raw_queries, log_error):
+ """
+ :param raw_queries: dict:
+ :param log_error: function:
+ :return: dict or None
+
+ raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries)
+ """
+
+ def is_valid_query(query):
+ return all([isinstance(query, str),
+ query.startswith(('SELECT', 'select', 'SHOW', 'show'))])
+
+ if hasattr(raw_queries, 'keys') and raw_queries:
+ valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)])
+ bad_queries = set(raw_queries) - set(valid_queries)
+
+ if bad_queries:
+ log_error('Removed query(s): {queries}'.format(queries=bad_queries))
+ return valid_queries
+ else:
+ log_error('Unsupported "queries" format. Must be not empty <dict>')
+ return None
+
+ if not PY_MYSQL:
+ self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin')
+ return False
+
+ # Preference: 1. "queries" from the configuration file 2. "queries" from the module
+ self.queries = self.__queries or self.queries
+ # Check if "self.queries" exist, not empty and all queries are in valid format
+ self.queries = is_valid_queries_dict(self.queries, self.error)
+ if not self.queries:
+ return None
+
+ # Get connection properties
+ self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties)
+ if not self.__conn_properties:
+ self.error('Connection properties are missing')
+ return False
+
+ # Create connection to the database
+ self.__connection, error = self.__connect()
+ if error:
+ self.error('Can\'t establish connection to MySQL: {error}'.format(error=error))
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Error: {error}'.format(error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error("_get_data() returned no data or type is not <dict>")
+ return False
+
+ def _get_raw_data(self, description=None):
+ """
+ Get raw data from MySQL server
+ :return: dict: fetchall() or (fetchall(), description)
+ """
+
+ if not self.__connection:
+ self.__connection, error = self.__connect()
+ if error:
+ return None
+
+ raw_data = dict()
+ queries = dict(self.queries)
+ try:
+ cursor = self.__connection.cursor()
+ for name, query in queries.items():
+ try:
+ cursor.execute(query)
+ except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
+ if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
+ cursor.close()
+ raise RuntimeError
+ self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
+ query=query,
+ error=error))
+ self.queries.pop(name)
+ continue
+ else:
+ raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ cursor.close()
+ self.__connection.commit()
+ except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
+ self.__connection.close()
+ self.__connection = None
+ return None
+ else:
+ return raw_data or None
+
+ @staticmethod
+ def __is_error_critical(err_class, err_text):
+ return err_class == MySQLdb.OperationalError and all(['denied' not in err_text,
+ 'Unknown column' not in err_text])
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
new file mode 100644
index 00000000..3f122e1d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -0,0 +1,261 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from bases.charts import Charts, ChartError, create_runtime_chart
+from bases.collection import safe_print
+from bases.loggers import PythonDLogger
+from third_party.monotonic import monotonic
+from time import sleep, time
+
+RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \
+ 'SET run_time = {elapsed}\n' \
+ 'END\n'
+
+PENALTY_EVERY = 5
+MAX_PENALTY = 10 * 60 # 10 minutes
+
+ND_INTERNAL_MONITORING_DISABLED = os.getenv("NETDATA_INTERNALS_MONITORING") == "NO"
+
+
+class RuntimeCounters:
+ def __init__(self, configuration):
+ """
+ :param configuration: <dict>
+ """
+ self.update_every = int(configuration.pop('update_every'))
+ self.do_penalty = configuration.pop('penalty')
+
+ self.start_mono = 0
+ self.start_real = 0
+ self.retries = 0
+ self.penalty = 0
+ self.elapsed = 0
+ self.prev_update = 0
+
+ self.runs = 1
+
+ def calc_next(self):
+ self.start_mono = monotonic()
+ return self.start_mono - (self.start_mono % self.update_every) + self.update_every + self.penalty
+
+ def sleep_until_next(self):
+ next_time = self.calc_next()
+ while self.start_mono < next_time:
+ sleep(next_time - self.start_mono)
+ self.start_mono = monotonic()
+ self.start_real = time()
+
+ def handle_retries(self):
+ self.retries += 1
+ if self.do_penalty and self.retries % PENALTY_EVERY == 0:
+ self.penalty = round(min(self.retries * self.update_every / 2, MAX_PENALTY))
+
+
+def clean_module_name(name):
+ if name.startswith('pythond_'):
+ return name[8:]
+ return name
+
+
+class SimpleService(PythonDLogger, object):
+ """
+ Prototype of Service class.
+ Implemented basic functionality to run jobs by `python.d.plugin`
+ """
+
+ def __init__(self, configuration, name=''):
+ """
+ :param configuration: <dict>
+ :param name: <str>
+ """
+ PythonDLogger.__init__(self)
+ self.configuration = configuration
+ self.order = list()
+ self.definitions = dict()
+
+ self.module_name = clean_module_name(self.__module__)
+ self.job_name = configuration.pop('job_name')
+ self.actual_job_name = self.job_name or self.module_name
+ self.override_name = configuration.pop('override_name')
+ self.fake_name = None
+
+ self._runtime_counters = RuntimeCounters(configuration=configuration)
+ self.charts = Charts(job_name=self.actual_name,
+ actual_job_name=self.actual_job_name,
+ priority=configuration.pop('priority'),
+ cleanup=configuration.pop('chart_cleanup'),
+ get_update_every=self.get_update_every,
+ module_name=self.module_name)
+
+ def __repr__(self):
+ return '<{cls_bases}: {name}>'.format(cls_bases=', '.join(c.__name__ for c in self.__class__.__bases__),
+ name=self.name)
+
+ @property
+ def name(self):
+ name = self.override_name or self.job_name
+ if name and name != self.module_name:
+ return '_'.join([self.module_name, name])
+ return self.module_name
+
+ def actual_name(self):
+ return self.fake_name or self.name
+
+ @property
+ def runs_counter(self):
+ return self._runtime_counters.runs
+
+ @property
+ def update_every(self):
+ return self._runtime_counters.update_every
+
+ @update_every.setter
+ def update_every(self, value):
+ """
+ :param value: <int>
+ :return:
+ """
+ self._runtime_counters.update_every = value
+
+ def get_update_every(self):
+ return self.update_every
+
+ def check(self):
+ """
+ check() prototype
+ :return: boolean
+ """
+ self.debug("job doesn't implement check() method. Using default which simply invokes get_data().")
+ data = self.get_data()
+ if data and isinstance(data, dict):
+ return True
+ self.debug('returned value is wrong: {0}'.format(data))
+ return False
+
+ @create_runtime_chart
+ def create(self):
+ for chart_name in self.order:
+ chart_config = self.definitions.get(chart_name)
+
+ if not chart_config:
+ self.debug("create() => [NOT ADDED] chart '{chart_name}' not in definitions. "
+ "Skipping it.".format(chart_name=chart_name))
+ continue
+
+ # create chart
+ chart_params = [chart_name] + chart_config['options']
+ try:
+ self.charts.add_chart(params=chart_params)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (chart '{chart}': {error})".format(chart=chart_name,
+ error=error))
+ continue
+
+ # add dimensions to chart
+ for dimension in chart_config['lines']:
+ try:
+ self.charts[chart_name].add_dimension(dimension)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (dimension '{dimension}': {error})".format(dimension=dimension,
+ error=error))
+ continue
+
+ # add variables to chart
+ if 'variables' in chart_config:
+ for variable in chart_config['variables']:
+ try:
+ self.charts[chart_name].add_variable(variable)
+ except ChartError as error:
+ self.error("create() => [NOT ADDED] (variable '{var}': {error})".format(var=variable,
+ error=error))
+ continue
+
+ del self.order
+ del self.definitions
+
+ # True if job has at least 1 chart else False
+ return bool(self.charts)
+
+ def run(self):
+ """
+ Runs job in thread. Handles retries.
+ Exits when job failed or timed out.
+ :return: None
+ """
+ job = self._runtime_counters
+ self.debug('started, update frequency: {freq}'.format(freq=job.update_every))
+
+ while True:
+ job.sleep_until_next()
+
+ since = 0
+ if job.prev_update:
+ since = int((job.start_real - job.prev_update) * 1e6)
+
+ try:
+ updated = self.update(interval=since)
+ except Exception as error:
+ self.error('update() unhandled exception: {error}'.format(error=error))
+ updated = False
+
+ job.runs += 1
+
+ if not updated:
+ job.handle_retries()
+ else:
+ job.elapsed = int((monotonic() - job.start_mono) * 1e3)
+ job.prev_update = job.start_real
+ job.retries, job.penalty = 0, 0
+ if not ND_INTERNAL_MONITORING_DISABLED:
+ safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
+ since_last=since,
+ elapsed=job.elapsed))
+ self.debug('update => [{status}] (elapsed time: {elapsed}, failed retries in a row: {retries})'.format(
+ status='OK' if updated else 'FAILED',
+ elapsed=job.elapsed if updated else '-',
+ retries=job.retries))
+
+ def update(self, interval):
+ """
+ :return:
+ """
+ data = self.get_data()
+ if not data:
+ self.debug('get_data() returned no data')
+ return False
+ elif not isinstance(data, dict):
+ self.debug('get_data() returned incorrect type data')
+ return False
+
+ updated = False
+
+ for chart in self.charts:
+ if chart.flags.obsoleted:
+ if chart.can_be_updated(data):
+ chart.refresh()
+ else:
+ continue
+ elif self.charts.cleanup and chart.penalty >= self.charts.cleanup:
+ chart.obsolete()
+ self.info("chart '{0}' was suppressed due to non updating".format(chart.name))
+ continue
+
+ ok = chart.update(data, interval)
+ if ok:
+ updated = True
+
+ if not updated:
+ self.debug('none of the charts has been updated')
+
+ return updated
+
+ def get_data(self):
+ return self._get_data()
+
+ def _get_data(self):
+ raise NotImplementedError
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
new file mode 100644
index 00000000..d6c75505
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
@@ -0,0 +1,336 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import errno
+import socket
+
+try:
+ import ssl
+except ImportError:
+ _TLS_SUPPORT = False
+else:
+ _TLS_SUPPORT = True
+
+if _TLS_SUPPORT:
+ try:
+ PROTOCOL_TLS = ssl.PROTOCOL_TLS
+ except AttributeError:
+ PROTOCOL_TLS = ssl.PROTOCOL_SSLv23
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+
+DEFAULT_CONNECT_TIMEOUT = 2.0
+DEFAULT_READ_TIMEOUT = 2.0
+DEFAULT_WRITE_TIMEOUT = 2.0
+
+
+class SocketService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ self._sock = None
+ self._keep_alive = False
+ self.host = 'localhost'
+ self.port = None
+ self.unix_socket = None
+ self.dgram_socket = False
+ self.request = ''
+ self.tls = False
+ self.cert = None
+ self.key = None
+ self.__socket_config = None
+ self.__empty_request = "".encode()
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.connect_timeout = configuration.get('connect_timeout', DEFAULT_CONNECT_TIMEOUT)
+ self.read_timeout = configuration.get('read_timeout', DEFAULT_READ_TIMEOUT)
+ self.write_timeout = configuration.get('write_timeout', DEFAULT_WRITE_TIMEOUT)
+
+ def _socket_error(self, message=None):
+ if self.unix_socket is not None:
+ self.error('unix socket "{socket}": {message}'.format(socket=self.unix_socket,
+ message=message))
+ else:
+ if self.__socket_config is not None:
+ _, _, _, _, sa = self.__socket_config
+ self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0],
+ port=sa[1],
+ message=message))
+ else:
+ self.error('unknown socket: {0}'.format(message))
+
+ def _connect2socket(self, res=None):
+ """
+ Connect to a socket, passing the result of getaddrinfo()
+ :return: boolean
+ """
+ if res is None:
+ res = self.__socket_config
+ if res is None:
+ self.error("Cannot create socket to 'None':")
+ return False
+
+ af, sock_type, proto, _, sa = res
+ try:
+ self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self._sock = socket.socket(af, sock_type, proto)
+ except socket.error as error:
+ self.error('Failed to create socket "{address}", port {port}, error: {error}'.format(address=sa[0],
+ port=sa[1],
+ error=error))
+ self._sock = None
+ self.__socket_config = None
+ return False
+
+ if self.tls:
+ try:
+ self.debug('Encapsulating socket with TLS')
+ self.debug('Using keyfile: {0}, certfile: {1}, cert_reqs: {2}, ssl_version: {3}'.format(
+ self.key, self.cert, ssl.CERT_NONE, PROTOCOL_TLS
+ ))
+ self._sock = ssl.wrap_socket(self._sock,
+ keyfile=self.key,
+ certfile=self.cert,
+ server_side=False,
+ cert_reqs=ssl.CERT_NONE,
+ ssl_version=PROTOCOL_TLS,
+ )
+ except (socket.error, ssl.SSLError, IOError, OSError) as error:
+ self.error('failed to wrap socket : {0}'.format(repr(error)))
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
+ try:
+ self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self._sock.settimeout(self.connect_timeout)
+ self.debug('set socket connect timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.connect(sa)
+ except (socket.error, ssl.SSLError) as error:
+ self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0],
+ port=sa[1],
+ error=error))
+ self._disconnect()
+ self.__socket_config = None
+ return False
+
+ self.debug('connected to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
+ self.__socket_config = res
+ return True
+
+ def _connect2unixsocket(self):
+ """
+ Connect to a unix socket, given its filename
+ :return: boolean
+ """
+ if self.unix_socket is None:
+ self.error("cannot connect to unix socket 'None'")
+ return False
+
+ try:
+ self.debug('attempting DGRAM unix socket "{0}"'.format(self.unix_socket))
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ self._sock.settimeout(self.connect_timeout)
+ self.debug('set socket connect timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.connect(self.unix_socket)
+ self.debug('connected DGRAM unix socket "{0}"'.format(self.unix_socket))
+ return True
+ except socket.error as error:
+ self.debug('Failed to connect DGRAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
+ error=error))
+
+ try:
+ self.debug('attempting STREAM unix socket "{0}"'.format(self.unix_socket))
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self._sock.settimeout(self.connect_timeout)
+ self.debug('set socket connect timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.connect(self.unix_socket)
+ self.debug('connected STREAM unix socket "{0}"'.format(self.unix_socket))
+ return True
+ except socket.error as error:
+ self.debug('Failed to connect STREAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
+ error=error))
+ self._sock = None
+ return False
+
+ def _connect(self):
+ """
+ Recreate socket and connect to it since sockets cannot be reused after closing
+ Available configurations are IPv6, IPv4 or UNIX socket
+ :return:
+ """
+ try:
+ if self.unix_socket is not None:
+ self._connect2unixsocket()
+
+ else:
+ if self.__socket_config is not None:
+ self._connect2socket()
+ else:
+ if self.dgram_socket:
+ sock_type = socket.SOCK_DGRAM
+ else:
+ sock_type = socket.SOCK_STREAM
+ for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, sock_type):
+ if self._connect2socket(res):
+ break
+
+ except Exception as error:
+ self.error('unhandled exception during connect : {0}'.format(repr(error)))
+ self._sock = None
+ self.__socket_config = None
+
+ def _disconnect(self):
+ """
+ Close socket connection
+ :return:
+ """
+ if self._sock is not None:
+ try:
+ self.debug('closing socket')
+ self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
+ self._sock.close()
+ except Exception as error:
+ if not (hasattr(error, 'errno') and error.errno == errno.ENOTCONN):
+ self.error(error)
+ self._sock = None
+
+ def _send(self, request=None):
+ """
+ Send request.
+ :return: boolean
+ """
+ # Send request if it is needed
+ if self.request != self.__empty_request:
+ try:
+ self.debug('set socket write timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.settimeout(self.write_timeout)
+ self.debug('sending request: {0}'.format(request or self.request))
+ self._sock.send(request or self.request)
+ except Exception as error:
+ self._socket_error('error sending request: {0}'.format(error))
+ self._disconnect()
+ return False
+ return True
+
+ def _receive(self, raw=False):
+ """
+ Receive data from socket
+ :param raw: set `True` to return bytes
+ :type raw: bool
+ :return: decoded str or raw bytes
+ :rtype: str/bytes
+ """
+ data = "" if not raw else b""
+ while True:
+ self.debug('receiving response')
+ try:
+ self.debug('set socket read timeout to: {0}'.format(self._sock.gettimeout()))
+ self._sock.settimeout(self.read_timeout)
+ buf = self._sock.recv(4096)
+ except Exception as error:
+ self._socket_error('failed to receive response: {0}'.format(error))
+ self._disconnect()
+ break
+
+ if buf is None or len(buf) == 0: # handle server disconnect
+ if data == "" or data == b"":
+ self._socket_error('unexpectedly disconnected')
+ else:
+ self.debug('server closed the connection')
+ self._disconnect()
+ break
+
+ self.debug('received data')
+ data += buf.decode('utf-8', 'ignore') if not raw else buf
+ if self._check_raw_data(data):
+ break
+
+ self.debug(u'final response: {0}'.format(data if not raw else u'binary data'))
+ return data
+
+ def _get_raw_data(self, raw=False, request=None):
+ """
+ Get raw data with low-level "socket" module.
+ :param raw: set `True` to return bytes
+ :type raw: bool
+ :return: decoded data (str) or raw data (bytes)
+ :rtype: str/bytes
+ """
+ if self._sock is None:
+ self._connect()
+ if self._sock is None:
+ return None
+
+ # Send request if it is needed
+ if not self._send(request):
+ return None
+
+ data = self._receive(raw)
+
+ if not self._keep_alive:
+ self._disconnect()
+
+ return data
+
+ @staticmethod
+ def _check_raw_data(data):
+ """
+ Check if all data has been gathered from socket
+ :param data: str
+ :return: boolean
+ """
+ return bool(data)
+
+ def _parse_config(self):
+ """
+ Parse configuration data
+ :return: boolean
+ """
+ try:
+ self.unix_socket = str(self.configuration['socket'])
+ except (KeyError, TypeError):
+ self.debug('No unix socket specified. Trying TCP/IP socket.')
+ self.unix_socket = None
+ try:
+ self.host = str(self.configuration['host'])
+ except (KeyError, TypeError):
+ self.debug('No host specified. Using: "{0}"'.format(self.host))
+ try:
+ self.port = int(self.configuration['port'])
+ except (KeyError, TypeError):
+ self.debug('No port specified. Using: "{0}"'.format(self.port))
+
+ self.tls = bool(self.configuration.get('tls', self.tls))
+ if self.tls and not _TLS_SUPPORT:
+ self.warning('TLS requested but no TLS module found, disabling TLS support.')
+ self.tls = False
+ if _TLS_SUPPORT and not self.tls:
+ self.debug('No TLS preference specified, not using TLS.')
+
+ if self.tls and _TLS_SUPPORT:
+ self.key = self.configuration.get('tls_key_file')
+ self.cert = self.configuration.get('tls_cert_file')
+ if not self.cert:
+ # If there's not a valid certificate, clear the key too.
+ self.debug('No valid TLS client certificate configuration found.')
+ self.key = None
+ self.cert = None
+ elif not self.key:
+ # If a key isn't listed, the config may still be
+ # valid, because there may be a key attached to the
+ # certificate.
+ self.info('No TLS client key specified, assuming it\'s attached to the certificate.')
+ self.key = None
+
+ try:
+ self.request = str(self.configuration['request'])
+ except (KeyError, TypeError):
+ self.debug('No request specified. Using: "{0}"'.format(self.request))
+
+ self.request = self.request.encode()
+
+ def check(self):
+ self._parse_config()
+ return SimpleService.check(self)
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
new file mode 100644
index 00000000..76129d37
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Pawel Krupa (paulfantom)
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import urllib3
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+try:
+ urllib3.disable_warnings()
+except AttributeError:
+ pass
+
+URLLIB3_VERSION = urllib3.__version__
+URLLIB3 = 'urllib3'
+
+class UrlService(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.debug("{0} version: {1}".format(URLLIB3, URLLIB3_VERSION))
+ self.url = self.configuration.get('url')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.proxy_user = self.configuration.get('proxy_user')
+ self.proxy_password = self.configuration.get('proxy_pass')
+ self.proxy_url = self.configuration.get('proxy_url')
+ self.method = self.configuration.get('method', 'GET')
+ self.header = self.configuration.get('header')
+ self.body = self.configuration.get('body')
+ self.request_timeout = self.configuration.get('timeout', 1)
+ self.respect_retry_after_header = self.configuration.get('respect_retry_after_header')
+ self.tls_verify = self.configuration.get('tls_verify')
+ self.tls_ca_file = self.configuration.get('tls_ca_file')
+ self.tls_key_file = self.configuration.get('tls_key_file')
+ self.tls_cert_file = self.configuration.get('tls_cert_file')
+ self._manager = None
+
+ def __make_headers(self, **header_kw):
+ user = header_kw.get('user') or self.user
+ password = header_kw.get('pass') or self.password
+ proxy_user = header_kw.get('proxy_user') or self.proxy_user
+ proxy_password = header_kw.get('proxy_pass') or self.proxy_password
+ custom_header = header_kw.get('header') or self.header
+ header_params = dict(keep_alive=True)
+ proxy_header_params = dict()
+ if user and password:
+ header_params['basic_auth'] = '{user}:{password}'.format(user=user,
+ password=password)
+ if proxy_user and proxy_password:
+ proxy_header_params['proxy_basic_auth'] = '{user}:{password}'.format(user=proxy_user,
+ password=proxy_password)
+ try:
+ header, proxy_header = urllib3.make_headers(**header_params), urllib3.make_headers(**proxy_header_params)
+ except TypeError as error:
+ self.error('build_header() error: {error}'.format(error=error))
+ return None, None
+ else:
+ header.update(custom_header or dict())
+ return header, proxy_header
+
+ def _build_manager(self, **header_kw):
+ header, proxy_header = self.__make_headers(**header_kw)
+ if header is None or proxy_header is None:
+ return None
+ proxy_url = header_kw.get('proxy_url') or self.proxy_url
+ if proxy_url:
+ manager = urllib3.ProxyManager
+ params = dict(proxy_url=proxy_url, headers=header, proxy_headers=proxy_header)
+ else:
+ manager = urllib3.PoolManager
+ params = dict(headers=header)
+ tls_cert_file = self.tls_cert_file
+ if tls_cert_file:
+ params['cert_file'] = tls_cert_file
+ # NOTE: key_file is useless without cert_file, but
+ # cert_file may include the key as well.
+ tls_key_file = self.tls_key_file
+ if tls_key_file:
+ params['key_file'] = tls_key_file
+ tls_ca_file = self.tls_ca_file
+ if tls_ca_file:
+ params['ca_certs'] = tls_ca_file
+ try:
+ url = header_kw.get('url') or self.url
+ is_https = url.startswith('https')
+ if skip_tls_verify(is_https, self.tls_verify, tls_ca_file):
+ params['ca_certs'] = None
+ params['cert_reqs'] = 'CERT_NONE'
+ if is_https:
+ params['assert_hostname'] = False
+ return manager(**params)
+ except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error:
+ self.error('build_manager() error:', str(error))
+ return None
+
+ def _get_raw_data(self, url=None, manager=None, **kwargs):
+ """
+ Get raw data from http request
+ :return: str
+ """
+ try:
+ response = self._do_request(url, manager, **kwargs)
+ except Exception as error:
+ self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error))
+ return None
+
+ if response.status == 200:
+ if isinstance(response.data, str):
+ return response.data
+ return response.data.decode(errors='ignore')
+ else:
+ self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=response.status))
+ return None
+
+ def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
+ """
+ Get status and response body content from http request. Does not catch exceptions
+ :return: int, str
+ """
+ response = self._do_request(url, manager, retries, redirect, **kwargs)
+
+ if isinstance(response.data, str):
+ return response.status, response.data
+ return response.status, response.data.decode(errors='ignore')
+
+ def _do_request(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
+ """
+ Get response from http request. Does not catch exceptions
+ :return: HTTPResponse
+ """
+ url = url or self.url
+ manager = manager or self._manager
+ retry = urllib3.Retry(retries)
+ if hasattr(retry, 'respect_retry_after_header'):
+ retry.respect_retry_after_header = bool(self.respect_retry_after_header)
+
+ if self.body:
+ kwargs['body'] = self.body
+
+ response = manager.request(
+ method=self.method,
+ url=url,
+ timeout=self.request_timeout,
+ retries=retry,
+ headers=manager.headers,
+ redirect=redirect,
+ **kwargs
+ )
+ return response
+
+ def check(self):
+ """
+ Format configuration data and try to connect to server
+ :return: boolean
+ """
+ if not (self.url and isinstance(self.url, str)):
+ self.error('URL is not defined or type is not <str>')
+ return False
+
+ self._manager = self._build_manager()
+ if not self._manager:
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Url: {url}. Error: {error}'.format(url=self.url, error=error))
+ return False
+
+ if isinstance(data, dict) and data:
+ return True
+ self.error('_get_data() returned no data or type is not <dict>')
+ return False
+
+
+def skip_tls_verify(is_https, tls_verify, tls_ca_file):
+ # default 'tls_verify' value is None
+ # logic is:
+ # - never skip if there is 'tls_ca_file' file
+ # - skip by default for https
+ # - do not skip by default for http
+ if tls_ca_file:
+ return False
+ if is_https and not tls_verify:
+ return True
+ return tls_verify is False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
new file mode 100644
index 00000000..203ad167
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
@@ -0,0 +1,431 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from bases.collection import safe_print
+
+CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden']
+DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
+VARIABLE_PARAMS = ['id', 'value']
+
+CHART_TYPES = ['line', 'area', 'stacked']
+DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row']
+
+CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
+CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
+ "{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
+CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
+ "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
+
+CLABEL_COLLECT_JOB = "CLABEL '_collect_job' '{actual_job_name}' '0'\n"
+CLABEL_COMMIT = "CLABEL_COMMIT\n"
+
+DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden} {obsolete}'\n"
+DIMENSION_SET = "SET '{id}' = {value}\n"
+
+CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n"
+
+# 1 is label source auto
+# https://github.com/netdata/netdata/blob/cc2586de697702f86a3c34e60e23652dd4ddcb42/database/rrd.h#L205
+RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time' 'ms' 'python.d' " \
+ "netdata.pythond_runtime line 145000 {update_every} '' 'python.d.plugin' '{module_name}'\n" \
+ "CLABEL '_collect_job' '{actual_job_name}' '1'\n" \
+ "CLABEL_COMMIT\n" \
+ "DIMENSION run_time 'run time' absolute 1 1\n"
+
+ND_INTERNAL_MONITORING_DISABLED = os.getenv("NETDATA_INTERNALS_MONITORING") == "NO"
+
+
+def create_runtime_chart(func):
+ """
+ Calls a wrapped function, then prints runtime chart to stdout.
+
+ Used as a decorator for SimpleService.create() method.
+ The whole point of making 'create runtime chart' functionality as a decorator was
+ to help users who re-implements create() in theirs classes.
+
+ :param func: class method
+ :return:
+ """
+
+ def wrapper(*args, **kwargs):
+ self = args[0]
+ if not ND_INTERNAL_MONITORING_DISABLED:
+ chart = RUNTIME_CHART_CREATE.format(
+ job_name=self.name,
+ actual_job_name=self.actual_job_name,
+ update_every=self._runtime_counters.update_every,
+ module_name=self.module_name,
+ )
+ safe_print(chart)
+ ok = func(*args, **kwargs)
+ return ok
+
+ return wrapper
+
+
+class ChartError(Exception):
+ """Base-class for all exceptions raised by this module"""
+
+
+class DuplicateItemError(ChartError):
+ """Occurs when user re-adds a chart or a dimension that has already been added"""
+
+
+class ItemTypeError(ChartError):
+ """Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class"""
+
+
+class ItemValueError(ChartError):
+ """Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class"""
+
+
+class Charts:
+ """Represent a collection of charts
+
+ All charts stored in a dict.
+ Chart is a instance of Chart class.
+ Charts adding must be done using Charts.add_chart() method only"""
+
+ def __init__(self, job_name, actual_job_name, priority, cleanup, get_update_every, module_name):
+ """
+ :param job_name: <bound method>
+ :param priority: <int>
+ :param get_update_every: <bound method>
+ """
+ self.job_name = job_name
+ self.actual_job_name = actual_job_name
+ self.priority = priority
+ self.cleanup = cleanup
+ self.get_update_every = get_update_every
+ self.module_name = module_name
+ self.charts = dict()
+
+ def __len__(self):
+ return len(self.charts)
+
+ def __iter__(self):
+ return iter(self.charts.values())
+
+ def __repr__(self):
+ return 'Charts({0})'.format(self)
+
+ def __str__(self):
+ return str([chart for chart in self.charts])
+
+ def __contains__(self, item):
+ return item in self.charts
+
+ def __getitem__(self, item):
+ return self.charts[item]
+
+ def __delitem__(self, key):
+ del self.charts[key]
+
+ def __bool__(self):
+ return bool(self.charts)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def add_chart(self, params):
+ """
+ Create Chart instance and add it to the dict
+
+ Manually adds job name, priority and update_every to params.
+ :param params: <list>
+ :return:
+ """
+ params = [self.job_name()] + params
+ new_chart = Chart(params)
+
+ new_chart.params['update_every'] = self.get_update_every()
+ new_chart.params['priority'] = self.priority
+ new_chart.params['module_name'] = self.module_name
+ new_chart.params['actual_job_name'] = self.actual_job_name
+
+ self.priority += 1
+ self.charts[new_chart.id] = new_chart
+
+ return new_chart
+
+ def active_charts(self):
+ return [chart.id for chart in self if not chart.flags.obsoleted]
+
+
+class Chart:
+ """Represent a chart"""
+
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'chart' must be a list type")
+ if not len(params) >= 8:
+ raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS))
+
+ self.params = dict(zip(CHART_PARAMS, (p or str() for p in params)))
+ self.name = '{type}.{id}'.format(type=self.params['type'],
+ id=self.params['id'])
+ if self.params.get('chart_type') not in CHART_TYPES:
+ self.params['chart_type'] = 'absolute'
+ hidden = str(self.params.get('hidden', ''))
+ self.params['hidden'] = 'hidden' if hidden == 'hidden' else ''
+
+ self.dimensions = list()
+ self.variables = set()
+ self.flags = ChartFlags()
+ self.penalty = 0
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __repr__(self):
+ return 'Chart({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __iter__(self):
+ return iter(self.dimensions)
+
+ def __contains__(self, item):
+ return item in [dimension.id for dimension in self.dimensions]
+
+ def add_variable(self, variable):
+ """
+ :param variable: <list>
+ :return:
+ """
+ self.variables.add(ChartVariable(variable))
+
+ def add_dimension(self, dimension):
+ """
+ :param dimension: <list>
+ :return:
+ """
+ dim = Dimension(dimension)
+
+ if dim.id in self:
+ raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id,
+ chart=self.name))
+ self.refresh()
+ self.dimensions.append(dim)
+ return dim
+
+ def del_dimension(self, dimension_id, hide=True):
+ if dimension_id not in self:
+ return
+ idx = self.dimensions.index(dimension_id)
+ dimension = self.dimensions[idx]
+ if hide:
+ dimension.params['hidden'] = 'hidden'
+ dimension.params['obsolete'] = 'obsolete'
+ self.create()
+ self.dimensions.remove(dimension)
+
+ def hide_dimension(self, dimension_id, reverse=False):
+ if dimension_id not in self:
+ return
+ idx = self.dimensions.index(dimension_id)
+ dimension = self.dimensions[idx]
+ dimension.params['hidden'] = 'hidden' if not reverse else str()
+ self.refresh()
+
+ def create(self):
+ """
+ :return:
+ """
+ chart = CHART_CREATE.format(**self.params)
+ labels = CLABEL_COLLECT_JOB.format(**self.params) + CLABEL_COMMIT
+ dimensions = ''.join([dimension.create() for dimension in self.dimensions])
+ variables = ''.join([var.set(var.value) for var in self.variables if var])
+
+ self.flags.push = False
+ self.flags.created = True
+
+ safe_print(chart + labels + dimensions + variables)
+
+ def can_be_updated(self, data):
+ for dim in self.dimensions:
+ if dim.get_value(data) is not None:
+ return True
+ return False
+
+ def update(self, data, interval):
+ updated_dimensions, updated_variables = str(), str()
+
+ for dim in self.dimensions:
+ value = dim.get_value(data)
+ if value is not None:
+ updated_dimensions += dim.set(value)
+
+ for var in self.variables:
+ value = var.get_value(data)
+ if value is not None:
+ updated_variables += var.set(value)
+
+ if updated_dimensions:
+ since_last = interval if self.flags.updated else 0
+
+ if self.flags.push:
+ self.create()
+
+ chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last)
+ safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n')
+
+ self.flags.updated = True
+ self.penalty = 0
+ else:
+ self.penalty += 1
+ self.flags.updated = False
+
+ return bool(updated_dimensions)
+
+ def obsolete(self):
+ self.flags.obsoleted = True
+ if self.flags.created:
+ safe_print(CHART_OBSOLETE.format(**self.params))
+
+ def refresh(self):
+ self.penalty = 0
+ self.flags.push = True
+ self.flags.obsoleted = False
+
+
+class Dimension:
+ """Represent a dimension"""
+
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'dimension' must be a list type")
+ if not params:
+ raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS))
+
+ self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params)))
+ self.params['name'] = self.params.get('name') or self.params['id']
+
+ if self.params.get('algorithm') not in DIMENSION_ALGORITHMS:
+ self.params['algorithm'] = 'absolute'
+ if not isinstance(self.params.get('multiplier'), int):
+ self.params['multiplier'] = 1
+ if not isinstance(self.params.get('divisor'), int):
+ self.params['divisor'] = 1
+ self.params.setdefault('hidden', '')
+ self.params.setdefault('obsolete', '')
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __repr__(self):
+ return 'Dimension({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __eq__(self, other):
+ if not isinstance(other, Dimension):
+ return self.id == other
+ return self.id == other.id
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def create(self):
+ return DIMENSION_CREATE.format(**self.params)
+
+ def set(self, value):
+ """
+ :param value: <str>: must be a digit
+ :return:
+ """
+ return DIMENSION_SET.format(id=self.id,
+ value=value)
+
+ def get_value(self, data):
+ try:
+ return int(data[self.id])
+ except (KeyError, TypeError):
+ return None
+
+
+class ChartVariable:
+ """Represent a chart variable"""
+
+ def __init__(self, params):
+ """
+ :param params: <list>
+ """
+ if not isinstance(params, list):
+ raise ItemTypeError("'variable' must be a list type")
+ if not params:
+ raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS))
+
+ self.params = dict(zip(VARIABLE_PARAMS, params))
+ self.params.setdefault('value', None)
+
+ def __getattr__(self, item):
+ try:
+ return self.params[item]
+ except KeyError:
+ raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
+ attr=item))
+
+ def __bool__(self):
+ return self.value is not None
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def __repr__(self):
+ return 'ChartVariable({0})'.format(self.id)
+
+ def __str__(self):
+ return self.id
+
+ def __eq__(self, other):
+ if isinstance(other, ChartVariable):
+ return self.id == other.id
+ return False
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def set(self, value):
+ return CHART_VARIABLE_SET.format(id=self.id,
+ value=value)
+
+ def get_value(self, data):
+ try:
+ return int(data[self.id])
+ except (KeyError, TypeError):
+ return None
+
+
+class ChartFlags:
+ def __init__(self):
+ self.push = True
+ self.created = False
+ self.updated = False
+ self.obsoleted = False
diff --git a/collectors/python.d.plugin/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
new file mode 100644
index 00000000..93bf8cf0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/collection.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+
+from threading import Lock
+
+PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':')
+
+CHART_BEGIN = 'BEGIN {0} {1}\n'
+CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n"
+DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n"
+DIMENSION_SET = "SET '{0}' = {1}\n"
+
+print_lock = Lock()
+
+
+def setdefault_values(config, base_dict):
+ for key, value in base_dict.items():
+ config.setdefault(key, value)
+ return config
+
+
+def run_and_exit(func):
+ def wrapper(*args, **kwargs):
+ func(*args, **kwargs)
+ exit(1)
+
+ return wrapper
+
+
+def on_try_except_finally(on_except=(None,), on_finally=(None,)):
+ except_func = on_except[0]
+ finally_func = on_finally[0]
+
+ def decorator(func):
+ def wrapper(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except Exception:
+ if except_func:
+ except_func(*on_except[1:])
+ finally:
+ if finally_func:
+ finally_func(*on_finally[1:])
+
+ return wrapper
+
+ return decorator
+
+
+def static_vars(**kwargs):
+ def decorate(func):
+ for k in kwargs:
+ setattr(func, k, kwargs[k])
+ return func
+
+ return decorate
+
+
+@on_try_except_finally(on_except=(exit, 1))
+def safe_print(*msg):
+ """
+ :param msg:
+ :return:
+ """
+ print_lock.acquire()
+ print(''.join(msg))
+ print_lock.release()
+
+
+def find_binary(binary):
+ """
+ :param binary: <str>
+ :return:
+ """
+ for directory in PATH:
+ binary_name = os.path.join(directory, binary)
+ if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK):
+ return binary_name
+ return None
+
+
+def read_last_line(f):
+ with open(f, 'rb') as opened:
+ opened.seek(-2, 2)
+ while opened.read(1) != b'\n':
+ opened.seek(-2, 1)
+ if opened.tell() == 0:
+ break
+ result = opened.readline()
+ return result.decode()
+
+
+def unicode_str(arg):
+ """Return the argument as a unicode string.
+
+ The `unicode` function has been removed from Python3 and `str` takes its
+ place. This function is a helper which will try using Python 2's `unicode`
+ and if it doesn't exist, assume we're using Python 3 and use `str`.
+
+ :param arg:
+ :return: <str>
+ """
+ # TODO: fix
+ try:
+ # https://github.com/netdata/netdata/issues/7613
+ if isinstance(arg, unicode):
+ return arg
+ return unicode(arg, errors='ignore')
+ # https://github.com/netdata/netdata/issues/7642
+ except TypeError:
+ return unicode(arg)
+ except NameError:
+ return str(arg)
diff --git a/collectors/python.d.plugin/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py
new file mode 100644
index 00000000..095f3a3b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/loaders.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from sys import version_info
+
+PY_VERSION = version_info[:2]
+
+try:
+ if PY_VERSION > (3, 1):
+ from pyyaml3 import SafeLoader as YamlSafeLoader
+ else:
+ from pyyaml2 import SafeLoader as YamlSafeLoader
+except ImportError:
+ from yaml import SafeLoader as YamlSafeLoader
+
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+
+DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' if PY_VERSION > (3, 1) else u'tag:yaml.org,2002:map'
+
+
+def dict_constructor(loader, node):
+ return OrderedDict(loader.construct_pairs(node))
+
+
+YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor)
+
+
+def load_yaml(stream):
+ loader = YamlSafeLoader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+
+def load_config(file_name):
+ with open(file_name, 'r') as stream:
+ return load_yaml(stream)
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
new file mode 100644
index 00000000..7ae8ab0c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+# Description:
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import logging
+import os
+import stat
+import traceback
+
+from sys import exc_info
+
+try:
+ from time import monotonic as time
+except ImportError:
+ from time import time
+
+from bases.collection import on_try_except_finally, unicode_str
+
+LOGGING_LEVELS = {
+ 'CRITICAL': 50,
+ 'ERROR': 40,
+ 'WARNING': 30,
+ 'INFO': 20,
+ 'DEBUG': 10,
+ 'NOTSET': 0,
+}
+
+
+def is_stderr_connected_to_journal():
+ journal_stream = os.environ.get("JOURNAL_STREAM")
+ if not journal_stream:
+ return False
+
+ colon_index = journal_stream.find(":")
+ if colon_index <= 0:
+ return False
+
+ device, inode = journal_stream[:colon_index], journal_stream[colon_index + 1:]
+
+ try:
+ device_number, inode_number = os.fstat(2)[stat.ST_DEV], os.fstat(2)[stat.ST_INO]
+ except OSError:
+ return False
+
+ return str(device_number) == device and str(inode_number) == inode
+
+
+is_journal = is_stderr_connected_to_journal()
+
+DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s'
+PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s'
+
+if is_journal:
+ DEFAULT_LOG_LINE_FORMAT = '%(name)s %(levelname)s : %(message)s'
+ PYTHON_D_LOG_LINE_FORMAT = '%(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s '
+
+DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
+PYTHON_D_LOG_NAME = 'python.d'
+
+
+def add_traceback(func):
+ def on_call(*args):
+ self = args[0]
+
+ if not self.log_traceback:
+ func(*args)
+ else:
+ if exc_info()[0]:
+ func(*args)
+ func(self, traceback.format_exc())
+ else:
+ func(*args)
+
+ return on_call
+
+
+class BaseLogger(object):
+ def __init__(
+ self,
+ logger_name,
+ log_fmt=DEFAULT_LOG_LINE_FORMAT,
+ date_fmt=DEFAULT_LOG_TIME_FORMAT,
+ handler=logging.StreamHandler,
+ ):
+ self.logger = logging.getLogger(logger_name)
+ self._muted = False
+ if not self.has_handlers():
+ self.severity = 'INFO'
+ self.logger.addHandler(handler())
+ self.set_formatter(fmt=log_fmt, date_fmt=date_fmt)
+
+ def __repr__(self):
+ return '<Logger: {name})>'.format(name=self.logger.name)
+
+ def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT):
+ if self.has_handlers():
+ self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt))
+
+ def has_handlers(self):
+ return self.logger.handlers
+
+ @property
+ def severity(self):
+ return self.logger.getEffectiveLevel()
+
+ @severity.setter
+ def severity(self, level):
+ if level in LOGGING_LEVELS:
+ self.logger.setLevel(LOGGING_LEVELS[level])
+
+ def _log(self, level, *msg, **kwargs):
+ if not self._muted:
+ self.logger.log(level, ' '.join(map(unicode_str, msg)), **kwargs)
+
+ def debug(self, *msg, **kwargs):
+ self._log(logging.DEBUG, *msg, **kwargs)
+
+ def info(self, *msg, **kwargs):
+ self._log(logging.INFO, *msg, **kwargs)
+
+ def warning(self, *msg, **kwargs):
+ self._log(logging.WARN, *msg, **kwargs)
+
+ def error(self, *msg, **kwargs):
+ self._log(logging.ERROR, *msg, **kwargs)
+
+ def alert(self, *msg, **kwargs):
+ self._log(logging.CRITICAL, *msg, **kwargs)
+
+ @on_try_except_finally(on_finally=(exit, 1))
+ def fatal(self, *msg, **kwargs):
+ self._log(logging.CRITICAL, *msg, **kwargs)
+
+ def mute(self):
+ self._muted = True
+
+ def unmute(self):
+ self._muted = False
+
+
+class PythonDLogger(object):
+ def __init__(
+ self,
+ logger_name=PYTHON_D_LOG_NAME,
+ log_fmt=PYTHON_D_LOG_LINE_FORMAT,
+ ):
+ self.logger = BaseLogger(logger_name, log_fmt=log_fmt)
+ self.module_name = 'plugin'
+ self.job_name = 'main'
+
+ _LOG_TRACEBACK = False
+
+ @property
+ def log_traceback(self):
+ return PythonDLogger._LOG_TRACEBACK
+
+ @log_traceback.setter
+ def log_traceback(self, value):
+ PythonDLogger._LOG_TRACEBACK = value
+
+ def debug(self, *msg):
+ self.logger.debug(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
+
+ def info(self, *msg):
+ self.logger.info(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
+
+ def warning(self, *msg):
+ self.logger.warning(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
+
+ @add_traceback
+ def error(self, *msg):
+ self.logger.error(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
+
+ @add_traceback
+ def alert(self, *msg):
+ self.logger.alert(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
+
+ def fatal(self, *msg):
+ self.logger.fatal(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
new file mode 100644
index 00000000..4d560e43
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
@@ -0,0 +1,316 @@
+# SPDX-License-Identifier: MIT
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '3.11'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
new file mode 100644
index 00000000..6b41b806
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
new file mode 100644
index 00000000..8ad1b90a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
@@ -0,0 +1,676 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
new file mode 100644
index 00000000..2858ab47
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
new file mode 100644
index 00000000..3685cbee
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
new file mode 100644
index 00000000..9a460a0f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
@@ -0,0 +1,1141 @@
+# SPDX-License-Identifier: MIT
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from error import YAMLError
+from events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
new file mode 100644
index 00000000..5466be72
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
new file mode 100644
index 00000000..283452ad
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: MIT
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
new file mode 100644
index 00000000..1c195531
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
new file mode 100644
index 00000000..ed2a1b43
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: MIT
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
new file mode 100644
index 00000000..97ba0833
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
@@ -0,0 +1,590 @@
+# SPDX-License-Identifier: MIT
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
new file mode 100644
index 00000000..8d422954
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
@@ -0,0 +1,191 @@
+# SPDX-License-Identifier: MIT
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
new file mode 100644
index 00000000..0a1404ec
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
@@ -0,0 +1,485 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import sys, copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
new file mode 100644
index 00000000..49922deb
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
new file mode 100644
index 00000000..971da612
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
@@ -0,0 +1,1458 @@
+# SPDX-License-Identifier: MIT
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in u',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == u':'
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
new file mode 100644
index 00000000..15fdbb0c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
new file mode 100644
index 00000000..c5c4fb11
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: MIT
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
new file mode 100644
index 00000000..a884b33c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
@@ -0,0 +1,313 @@
+# SPDX-License-Identifier: MIT
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '3.11'
+try:
+ from .cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+import io
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = io.StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
new file mode 100644
index 00000000..c418bba9
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer:
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor, event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor, self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
new file mode 100644
index 00000000..ee09a7a7
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
@@ -0,0 +1,687 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from .error import *
+from .nodes import *
+
+import collections, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor:
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if not isinstance(key, collections.Hashable):
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unhashable key", key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return super().construct_scalar(node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return super().construct_mapping(node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ r'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag,
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ def construct_python_long(self, node):
+ return self.construct_yaml_int(node)
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name, exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if '.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = 'builtins'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name, exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r"
+ % (object_name, module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes',
+ Constructor.construct_python_bytes)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
new file mode 100644
index 00000000..e6c16d89
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
new file mode 100644
index 00000000..ba590c6e
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
new file mode 100644
index 00000000..d4be65a8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
@@ -0,0 +1,1138 @@
+# SPDX-License-Identifier: MIT
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis:
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter:
+
+ DEFAULT_TAG_PREFIXES = {
+ '!' : '!',
+ 'tag:yaml.org,2002:' : '!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor('&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator('[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator('{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator('-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == '')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return '%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError("tag handle must start and end with '!': %r" % handle)
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch, handle))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return ''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == '!' and handle != '!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = ''.join(chunks)
+ if handle:
+ return '%s%s' % (handle, suffix_text)
+ else:
+ return '!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch, anchor))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',?[]{}':
+ flow_indicators = True
+ if ch == ':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = '%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = '%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator('\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == '\'':
+ data = '\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator('\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '\"': '\"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+ or not ('\x20' <= ch <= '\x7E'
+ or (self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ hints += str(self.best_indent)
+ if text[-1] not in '\n\x85\u2028\u2029':
+ hints += '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ hints += '+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('>'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != ' ' \
+ and text[start] == '\n':
+ self.write_line_break()
+ leading_space = (ch == ' ')
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ spaces = (ch == ' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('|'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
new file mode 100644
index 00000000..5fec7d44
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
new file mode 100644
index 00000000..283452ad
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: MIT
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
new file mode 100644
index 00000000..7ef6cf81
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
new file mode 100644
index 00000000..ed2a1b43
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: MIT
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
new file mode 100644
index 00000000..bcec7f99
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
@@ -0,0 +1,590 @@
+# SPDX-License-Identifier: MIT
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ '!': '!',
+ '!!': 'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == 'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle,
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle,
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == '!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), '',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
new file mode 100644
index 00000000..0a515fd6
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: MIT
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, bytes):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, str):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+'\0'
+ elif isinstance(stream, bytes):
+ self.name = "<byte string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' \
+ or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=4096):
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
new file mode 100644
index 00000000..756a18dc
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
@@ -0,0 +1,375 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, sys, copyreg, types, base64
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter:
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, bytes, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data):
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if '.' not in value and 'e' in value:
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+ SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = '%r' % data.real
+ elif data.real == 0.0:
+ data = '%rj' % data.imag
+ elif data.imag > 0:
+ data = '%r+%rj' % (data.real, data.imag)
+ else:
+ data = '%r%rj' % (data.real, data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = '%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ 'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = '%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
new file mode 100644
index 00000000..50945e04
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, str) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (str, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, str):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == '':
+ resolvers = self.yaml_implicit_resolvers.get('', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:bool',
+ re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:float',
+ re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:int',
+ re.compile(r'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:merge',
+ re.compile(r'^(?:<<)$'),
+ ['<'])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:null',
+ re.compile(r'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:timestamp',
+ re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:value',
+ re.compile(r'^(?:=)$'),
+ ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:yaml',
+ re.compile(r'^(?:!|&|\*)$'),
+ list('!&*'))
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
new file mode 100644
index 00000000..b55854e8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
@@ -0,0 +1,1449 @@
+# SPDX-License-Identifier: MIT
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner:
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == '\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token" % ch,
+ self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '---' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '...' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+ and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == '\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r" % self.peek(),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch, self.get_mark())
+ length = 0
+ while '0' <= self.peek(length) <= '9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == ' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != ' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch, self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == '<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != '>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ elif ch in '\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = '!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in ' \t'
+ length = 0
+ while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == '\n' \
+ and leading_non_space and self.peek() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == '\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch, self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r" % ch,
+ self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ while self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '\"': '\"',
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {
+ 'x': 2,
+ 'u': 4,
+ 'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == '\'' and self.peek(1) == '\'':
+ chunks.append('\'')
+ self.forward(2)
+ elif (double and ch == '\'') or (not double and ch in '\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == '\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k)), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(chr(code))
+ self.forward(length)
+ elif ch in '\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch, self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in ' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == '\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in ' \t':
+ self.forward()
+ if self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == '#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in '\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == ':' and
+ self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in ',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == ':'
+ and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == '#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in ' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != '!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != '!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == '%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch, self.get_mark())
+ return ''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ codes = []
+ mark = self.get_mark()
+ while self.peek() == '%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+ % self.peek(k), self.get_mark())
+ codes.append(int(self.prefix(2), 16))
+ self.forward(2)
+ try:
+ value = bytes(codes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in '\r\n\x85':
+ if self.prefix(2) == '\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.forward()
+ return ch
+ return ''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
new file mode 100644
index 00000000..1ba2f7f9
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: MIT
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer:
+
+ ANCHOR_TEMPLATE = 'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
new file mode 100644
index 00000000..c5c4fb11
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: MIT
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/collectors/python.d.plugin/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
new file mode 100644
index 00000000..ec21779a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
@@ -0,0 +1,515 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# client.py - Somewhat higher-level GUI_RPC API for BOINC core client
+#
+# Copyright (C) 2013 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
+# Copyright (C) 2017 Austin S. Hemmelgarn
+#
+# SPDX-License-Identifier: GPL-3.0
+
+# Based on client/boinc_cmd.cpp
+
+import hashlib
+import socket
+import sys
+import time
+from functools import total_ordering
+from xml.etree import ElementTree
+
+GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg"
+
+GUI_RPC_HOSTNAME = None # localhost
+GUI_RPC_PORT = 31416
+GUI_RPC_TIMEOUT = 1
+
+class Rpc(object):
+ ''' Class to perform GUI RPC calls to a BOINC core client.
+ Usage in a context manager ('with' block) is recommended to ensure
+ disconnect() is called. Using the same instance for all calls is also
+ recommended so it reuses the same socket connection
+ '''
+ def __init__(self, hostname="", port=0, timeout=0, text_output=False):
+ self.hostname = hostname
+ self.port = port
+ self.timeout = timeout
+ self.sock = None
+ self.text_output = text_output
+
+ @property
+ def sockargs(self):
+ return (self.hostname, self.port, self.timeout)
+
+ def __enter__(self): self.connect(*self.sockargs); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self, hostname="", port=0, timeout=0):
+ ''' Connect to (hostname, port) with timeout in seconds.
+ Hostname defaults to None (localhost), and port to 31416
+ Calling multiple times will disconnect previous connection (if any),
+ and (re-)connect to host.
+ '''
+ if self.sock:
+ self.disconnect()
+
+ self.hostname = hostname or GUI_RPC_HOSTNAME
+ self.port = port or GUI_RPC_PORT
+ self.timeout = timeout or GUI_RPC_TIMEOUT
+
+ self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2])
+
+ def disconnect(self):
+ ''' Disconnect from host. Calling multiple times is OK (idempotent)
+ '''
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+
+ def call(self, request, text_output=None):
+ ''' Do an RPC call. Pack and send the XML request and return the
+ unpacked reply. request can be either plain XML text or a
+ xml.etree.ElementTree.Element object. Return ElementTree.Element
+ or XML text according to text_output flag.
+ Will auto-connect if not connected.
+ '''
+ if text_output is None:
+ text_output = self.text_output
+
+ if not self.sock:
+ self.connect(*self.sockargs)
+
+ if not isinstance(request, ElementTree.Element):
+ request = ElementTree.fromstring(request)
+
+ # pack request
+ end = '\003'
+ if sys.version_info[0] < 3:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end)
+ else:
+ req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode()
+
+ try:
+ self.sock.sendall(req)
+ except (socket.error, socket.herror, socket.gaierror, socket.timeout):
+ raise
+
+ req = ""
+ while True:
+ try:
+ buf = self.sock.recv(8192)
+ if not buf:
+ raise socket.error("No data from socket")
+ if sys.version_info[0] >= 3:
+ buf = buf.decode()
+ except socket.error:
+ raise
+ n = buf.find(end)
+ if not n == -1: break
+ req += buf
+ req += buf[:n]
+
+ # unpack reply (remove root tag, ie: first and last lines)
+ req = '\n'.join(req.strip().rsplit('\n')[1:-1])
+
+ if text_output:
+ return req
+ else:
+ return ElementTree.fromstring(req)
+
+def setattrs_from_xml(obj, xml, attrfuncdict={}):
+ ''' Helper to set values for attributes of a class instance by mapping
+ matching tags from a XML file.
+ attrfuncdict is a dict of functions to customize value data type of
+ each attribute. It falls back to simple int/float/bool/str detection
+ based on values defined in __init__(). This would not be needed if
+ Boinc used standard RPC protocol, which includes data type in XML.
+ '''
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+ for e in list(xml):
+ if hasattr(obj, e.tag):
+ attr = getattr(obj, e.tag)
+ attrfunc = attrfuncdict.get(e.tag, None)
+ if attrfunc is None:
+ if isinstance(attr, bool): attrfunc = parse_bool
+ elif isinstance(attr, int): attrfunc = parse_int
+ elif isinstance(attr, float): attrfunc = parse_float
+ elif isinstance(attr, str): attrfunc = parse_str
+ elif isinstance(attr, list): attrfunc = parse_list
+ else: attrfunc = lambda x: x
+ setattr(obj, e.tag, attrfunc(e))
+ else:
+ pass
+ #print "class missing attribute '%s': %r" % (e.tag, obj)
+ return obj
+
+
+def parse_bool(e):
+ ''' Helper to convert ElementTree.Element.text to boolean.
+ Treat '<foo/>' (and '<foo>[[:blank:]]</foo>') as True
+ Treat '0' and 'false' as False
+ '''
+ if e.text is None:
+ return True
+ else:
+ return bool(e.text) and not e.text.strip().lower() in ('0', 'false')
+
+
+def parse_int(e):
+ ''' Helper to convert ElementTree.Element.text to integer.
+ Treat '<foo/>' (and '<foo></foo>') as 0
+ '''
+ # int(float()) allows casting to int a value expressed as float in XML
+ return 0 if e.text is None else int(float(e.text.strip()))
+
+
+def parse_float(e):
+ ''' Helper to convert ElementTree.Element.text to float. '''
+ return 0.0 if e.text is None else float(e.text.strip())
+
+
+def parse_str(e):
+ ''' Helper to convert ElementTree.Element.text to string. '''
+ return "" if e.text is None else e.text.strip()
+
+
+def parse_list(e):
+ ''' Helper to convert ElementTree.Element to list. For now, simply return
+ the list of root element's children
+ '''
+ return list(e)
+
+
+class Enum(object):
+ UNKNOWN = -1 # Not in original API
+
+ @classmethod
+ def name(cls, value):
+ ''' Quick-and-dirty fallback for getting the "name" of an enum item '''
+
+ # value as string, if it matches an enum attribute.
+ # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE)
+ if hasattr(cls, str(value)):
+ return cls.name(getattr(cls, value, None))
+
+ # value not handled in subclass name()
+ for k, v in cls.__dict__.items():
+ if v == value:
+ return k.lower().replace('_', ' ')
+
+ # value not found
+ return cls.name(Enum.UNKNOWN)
+
+
+class CpuSched(Enum):
+ ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state
+ "SCHEDULED" is synonymous with "executing" except when CPU throttling
+ is in use.
+ '''
+ UNINITIALIZED = 0
+ PREEMPTED = 1
+ SCHEDULED = 2
+
+
+class ResultState(Enum):
+ ''' Values of RESULT::state in client.
+ THESE MUST BE IN NUMERICAL ORDER
+ (because of the > comparison in RESULT::computing_done())
+ see html/inc/common_defs.inc
+ '''
+ NEW = 0
+ #// New result
+ FILES_DOWNLOADING = 1
+ #// Input files for result (WU, app version) are being downloaded
+ FILES_DOWNLOADED = 2
+ #// Files are downloaded, result can be (or is being) computed
+ COMPUTE_ERROR = 3
+ #// computation failed; no file upload
+ FILES_UPLOADING = 4
+ #// Output files for result are being uploaded
+ FILES_UPLOADED = 5
+ #// Files are uploaded, notify scheduling server at some point
+ ABORTED = 6
+ #// result was aborted
+ UPLOAD_FAILED = 7
+ #// some output file permanent failure
+
+
+class Process(Enum):
+ ''' values of ACTIVE_TASK::task_state '''
+ UNINITIALIZED = 0
+ #// process doesn't exist yet
+ EXECUTING = 1
+ #// process is running, as far as we know
+ SUSPENDED = 9
+ #// we've sent it a "suspend" message
+ ABORT_PENDING = 5
+ #// process exceeded limits; send "abort" message, waiting to exit
+ QUIT_PENDING = 8
+ #// we've sent it a "quit" message, waiting to exit
+ COPY_PENDING = 10
+ #// waiting for async file copies to finish
+
+
+class _Struct(object):
+ ''' base helper class with common methods for all classes derived from
+ BOINC's C++ structs
+ '''
+ @classmethod
+ def parse(cls, xml):
+ return setattrs_from_xml(cls(), xml)
+
+ def __str__(self, indent=0):
+ buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ buf += '{0}\t{1} [\n'.format('\t' * indent, attr)
+ for v in value: buf += '\t\t{0}\t\t,\n'.format(v)
+ buf += '\t]\n'
+ else:
+ buf += '{0}\t{1}\t{2}\n'.format('\t' * indent,
+ attr,
+ value.__str__(indent+2)
+ if isinstance(value, _Struct)
+ else repr(value))
+ return buf
+
+
+@total_ordering
+class VersionInfo(_Struct):
+ def __init__(self, major=0, minor=0, release=0):
+ self.major = major
+ self.minor = minor
+ self.release = release
+
+ @property
+ def _tuple(self):
+ return (self.major, self.minor, self.release)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self._tuple == other._tuple
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __gt__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self._tuple > other._tuple
+
+ def __str__(self):
+ return "{0}.{1}.{2}".format(self.major, self.minor, self.release)
+
+ def __repr__(self):
+ return "{0}{1}".format(self.__class__.__name__, self._tuple)
+
+
+class Result(_Struct):
+ ''' Also called "task" in some contexts '''
+ def __init__(self):
+ # Names and values follow lib/gui_rpc_client.h @ RESULT
+ # Order too, except when grouping contradicts client/result.cpp
+ # RESULT::write_gui(), then XML order is used.
+
+ self.name = ""
+ self.wu_name = ""
+ self.version_num = 0
+ #// identifies the app used
+ self.plan_class = ""
+ self.project_url = "" # from PROJECT.master_url
+ self.report_deadline = 0.0 # seconds since epoch
+ self.received_time = 0.0 # seconds since epoch
+ #// when we got this from server
+ self.ready_to_report = False
+ #// we're ready to report this result to the server;
+ #// either computation is done and all the files have been uploaded
+ #// or there was an error
+ self.got_server_ack = False
+ #// we've received the ack for this result from the server
+ self.final_cpu_time = 0.0
+ self.final_elapsed_time = 0.0
+ self.state = ResultState.NEW
+ self.estimated_cpu_time_remaining = 0.0
+ #// actually, estimated elapsed time remaining
+ self.exit_status = 0
+ #// return value from the application
+ self.suspended_via_gui = False
+ self.project_suspended_via_gui = False
+ self.edf_scheduled = False
+ #// temporary used to tell GUI that this result is deadline-scheduled
+ self.coproc_missing = False
+ #// a coproc needed by this job is missing
+ #// (e.g. because user removed their GPU board).
+ self.scheduler_wait = False
+ self.scheduler_wait_reason = ""
+ self.network_wait = False
+ self.resources = ""
+ #// textual description of resources used
+
+ #// the following defined if active
+ # XML is generated in client/app.cpp ACTIVE_TASK::write_gui()
+ self.active_task = False
+ self.active_task_state = Process.UNINITIALIZED
+ self.app_version_num = 0
+ self.slot = -1
+ self.pid = 0
+ self.scheduler_state = CpuSched.UNINITIALIZED
+ self.checkpoint_cpu_time = 0.0
+ self.current_cpu_time = 0.0
+ self.fraction_done = 0.0
+ self.elapsed_time = 0.0
+ self.swap_size = 0
+ self.working_set_size_smoothed = 0.0
+ self.too_large = False
+ self.needs_shmem = False
+ self.graphics_exec_path = ""
+ self.web_graphics_url = ""
+ self.remote_desktop_addr = ""
+ self.slot_path = ""
+ #// only present if graphics_exec_path is
+
+ # The following are not in original API, but are present in RPC XML reply
+ self.completed_time = 0.0
+ #// time when ready_to_report was set
+ self.report_immediately = False
+ self.working_set_size = 0
+ self.page_fault_rate = 0.0
+ #// derived by higher-level code
+
+ # The following are in API, but are NEVER in RPC XML reply. Go figure
+ self.signal = 0
+
+ self.app = None # APP*
+ self.wup = None # WORKUNIT*
+ self.project = None # PROJECT*
+ self.avp = None # APP_VERSION*
+
+ @classmethod
+ def parse(cls, xml):
+ if not isinstance(xml, ElementTree.Element):
+ xml = ElementTree.fromstring(xml)
+
+ # parse main XML
+ result = super(Result, cls).parse(xml)
+
+ # parse '<active_task>' children
+ active_task = xml.find('active_task')
+ if active_task is None:
+ result.active_task = False # already the default after __init__()
+ else:
+ result.active_task = True # already the default after main parse
+ result = setattrs_from_xml(result, active_task)
+
+ #// if CPU time is nonzero but elapsed time is zero,
+ #// we must be talking to an old client.
+ #// Set elapsed = CPU
+ #// (easier to deal with this here than in the manager)
+ if result.current_cpu_time != 0 and result.elapsed_time == 0:
+ result.elapsed_time = result.current_cpu_time
+
+ if result.final_cpu_time != 0 and result.final_elapsed_time == 0:
+ result.final_elapsed_time = result.final_cpu_time
+
+ return result
+
+ def __str__(self):
+ buf = '{0}:\n'.format(self.__class__.__name__)
+ for attr in self.__dict__:
+ value = getattr(self, attr)
+ if attr in ['received_time', 'report_deadline']:
+ value = time.ctime(value)
+ buf += '\t{0}\t{1}\n'.format(attr, value)
+ return buf
+
+
+class BoincClient(object):
+
+ def __init__(self, host="", port=0, passwd=None):
+ self.hostname = host
+ self.port = port
+ self.passwd = passwd
+ self.rpc = Rpc(text_output=False)
+ self.version = None
+ self.authorized = False
+
+ # Informative, not authoritative. Records status of *last* RPC call,
+ # but does not infer success about the *next* one.
+ # Thus, it should be read *after* an RPC call, not prior to one
+ self.connected = False
+
+ def __enter__(self): self.connect(); return self
+ def __exit__(self, *args): self.disconnect()
+
+ def connect(self):
+ try:
+ self.rpc.connect(self.hostname, self.port)
+ self.connected = True
+ except socket.error:
+ self.connected = False
+ return
+ self.authorized = self.authorize(self.passwd)
+ self.version = self.exchange_versions()
+
+ def disconnect(self):
+ self.rpc.disconnect()
+
+ def authorize(self, password):
+ ''' Request authorization. If password is None and we are connecting
+ to localhost, try to read password from the local config file
+ GUI_RPC_PASSWD_FILE. If file can't be read (not found or no
+ permission to read), try to authorize with a blank password.
+ If authorization is requested and fails, all subsequent calls
+ will be refused with socket.error 'Connection reset by peer' (104).
+ Since most local calls do no require authorization, do not attempt
+ it if you're not sure about the password.
+ '''
+ if password is None and not self.hostname:
+ password = read_gui_rpc_password() or ""
+ nonce = self.rpc.call('<auth1/>').text
+ authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower()
+ reply = self.rpc.call('<auth2><nonce_hash>{0}</nonce_hash></auth2>'.format(authhash))
+
+ if reply.tag == 'authorized':
+ return True
+ else:
+ return False
+
+ def exchange_versions(self):
+ ''' Return VersionInfo instance with core client version info '''
+ return VersionInfo.parse(self.rpc.call('<exchange_versions/>'))
+
+ def get_tasks(self):
+ ''' Same as get_results(active_only=False) '''
+ return self.get_results(False)
+
+ def get_results(self, active_only=False):
+ ''' Get a list of results.
+ Those that are in progress will have information such as CPU time
+ and fraction done. Each result includes a name;
+ Use CC_STATE::lookup_result() to find this result in the current static state;
+ if it's not there, call get_state() again.
+ '''
+ reply = self.rpc.call("<get_results><active_only>{0}</active_only></get_results>".format(1 if active_only else 0))
+ if not reply.tag == 'results':
+ return []
+
+ results = []
+ for item in list(reply):
+ results.append(Result.parse(item))
+
+ return results
+
+
+def read_gui_rpc_password():
+ ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR
+ (if any), and return it
+ '''
+ try:
+ with open(GUI_RPC_PASSWD_FILE, 'r') as f:
+ buf = f.read()
+ if buf.endswith('\n'): return buf[:-1] # trim last CR
+ else: return buf
+ except IOError:
+ # Permission denied or File not found.
+ pass
diff --git a/collectors/python.d.plugin/python_modules/third_party/filelock.py b/collectors/python.d.plugin/python_modules/third_party/filelock.py
new file mode 100644
index 00000000..4c981672
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/filelock.py
@@ -0,0 +1,451 @@
+# This is free and unencumbered software released into the public domain.
+#
+# Anyone is free to copy, modify, publish, use, compile, sell, or
+# distribute this software, either in source code form or as a compiled
+# binary, for any purpose, commercial or non-commercial, and by any
+# means.
+#
+# In jurisdictions that recognize copyright laws, the author or authors
+# of this software dedicate any and all copyright interest in the
+# software to the public domain. We make this dedication for the benefit
+# of the public at large and to the detriment of our heirs and
+# successors. We intend this dedication to be an overt act of
+# relinquishment in perpetuity of all present and future rights to this
+# software under copyright law.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# For more information, please refer to <http://unlicense.org>
+
+"""
+A platform independent file lock that supports the with-statement.
+"""
+
+
+# Modules
+# ------------------------------------------------
+import logging
+import os
+import threading
+import time
+try:
+ import warnings
+except ImportError:
+ warnings = None
+
+try:
+ import msvcrt
+except ImportError:
+ msvcrt = None
+
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+
+
+# Backward compatibility
+# ------------------------------------------------
+try:
+ TimeoutError
+except NameError:
+ TimeoutError = OSError
+
+
+# Data
+# ------------------------------------------------
+__all__ = [
+ "Timeout",
+ "BaseFileLock",
+ "WindowsFileLock",
+ "UnixFileLock",
+ "SoftFileLock",
+ "FileLock"
+]
+
+__version__ = "3.0.12"
+
+
+_logger = None
+def logger():
+ """Returns the logger instance used in this module."""
+ global _logger
+ _logger = _logger or logging.getLogger(__name__)
+ return _logger
+
+
+# Exceptions
+# ------------------------------------------------
+class Timeout(TimeoutError):
+ """
+ Raised when the lock could not be acquired in *timeout*
+ seconds.
+ """
+
+ def __init__(self, lock_file):
+ """
+ """
+ #: The path of the file lock.
+ self.lock_file = lock_file
+ return None
+
+ def __str__(self):
+ temp = "The file lock '{}' could not be acquired."\
+ .format(self.lock_file)
+ return temp
+
+
+# Classes
+# ------------------------------------------------
+
+# This is a helper class which is returned by :meth:`BaseFileLock.acquire`
+# and wraps the lock to make sure __enter__ is not called twice when entering
+# the with statement.
+# If we would simply return *self*, the lock would be acquired again
+# in the *__enter__* method of the BaseFileLock, but not released again
+# automatically.
+#
+# :seealso: issue #37 (memory leak)
+class _Acquire_ReturnProxy(object):
+
+ def __init__(self, lock):
+ self.lock = lock
+ return None
+
+ def __enter__(self):
+ return self.lock
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.lock.release()
+ return None
+
+
+class BaseFileLock(object):
+ """
+ Implements the base class of a file lock.
+ """
+
+ def __init__(self, lock_file, timeout = -1):
+ """
+ """
+ # The path to the lock file.
+ self._lock_file = lock_file
+
+ # The file descriptor for the *_lock_file* as it is returned by the
+ # os.open() function.
+ # This file lock is only NOT None, if the object currently holds the
+ # lock.
+ self._lock_file_fd = None
+
+ # The default timeout value.
+ self.timeout = timeout
+
+ # We use this lock primarily for the lock counter.
+ self._thread_lock = threading.Lock()
+
+ # The lock counter is used for implementing the nested locking
+ # mechanism. Whenever the lock is acquired, the counter is increased and
+ # the lock is only released, when this value is 0 again.
+ self._lock_counter = 0
+ return None
+
+ @property
+ def lock_file(self):
+ """
+ The path to the lock file.
+ """
+ return self._lock_file
+
+ @property
+ def timeout(self):
+ """
+ You can set a default timeout for the filelock. It will be used as
+ fallback value in the acquire method, if no timeout value (*None*) is
+ given.
+
+ If you want to disable the timeout, set it to a negative value.
+
+ A timeout of 0 means, that there is exactly one attempt to acquire the
+ file lock.
+
+ .. versionadded:: 2.0.0
+ """
+ return self._timeout
+
+ @timeout.setter
+ def timeout(self, value):
+ """
+ """
+ self._timeout = float(value)
+ return None
+
+ # Platform dependent locking
+ # --------------------------------------------
+
+ def _acquire(self):
+ """
+ Platform dependent. If the file lock could be
+ acquired, self._lock_file_fd holds the file descriptor
+ of the lock file.
+ """
+ raise NotImplementedError()
+
+ def _release(self):
+ """
+ Releases the lock and sets self._lock_file_fd to None.
+ """
+ raise NotImplementedError()
+
+ # Platform independent methods
+ # --------------------------------------------
+
+ @property
+ def is_locked(self):
+ """
+ True, if the object holds the file lock.
+
+ .. versionchanged:: 2.0.0
+
+ This was previously a method and is now a property.
+ """
+ return self._lock_file_fd is not None
+
+ def acquire(self, timeout=None, poll_intervall=0.05):
+ """
+ Acquires the file lock or fails with a :exc:`Timeout` error.
+
+ .. code-block:: python
+
+ # You can use this method in the context manager (recommended)
+ with lock.acquire():
+ pass
+
+ # Or use an equivalent try-finally construct:
+ lock.acquire()
+ try:
+ pass
+ finally:
+ lock.release()
+
+ :arg float timeout:
+ The maximum time waited for the file lock.
+ If ``timeout < 0``, there is no timeout and this method will
+ block until the lock could be acquired.
+ If ``timeout`` is None, the default :attr:`~timeout` is used.
+
+ :arg float poll_intervall:
+ We check once in *poll_intervall* seconds if we can acquire the
+ file lock.
+
+ :raises Timeout:
+ if the lock could not be acquired in *timeout* seconds.
+
+ .. versionchanged:: 2.0.0
+
+ This method returns now a *proxy* object instead of *self*,
+ so that it can be used in a with statement without side effects.
+ """
+ # Use the default timeout, if no timeout is provided.
+ if timeout is None:
+ timeout = self.timeout
+
+ # Increment the number right at the beginning.
+ # We can still undo it, if something fails.
+ with self._thread_lock:
+ self._lock_counter += 1
+
+ lock_id = id(self)
+ lock_filename = self._lock_file
+ start_time = time.time()
+ try:
+ while True:
+ with self._thread_lock:
+ if not self.is_locked:
+ logger().debug('Attempting to acquire lock %s on %s', lock_id, lock_filename)
+ self._acquire()
+
+ if self.is_locked:
+ logger().info('Lock %s acquired on %s', lock_id, lock_filename)
+ break
+ elif timeout >= 0 and time.time() - start_time > timeout:
+ logger().debug('Timeout on acquiring lock %s on %s', lock_id, lock_filename)
+ raise Timeout(self._lock_file)
+ else:
+ logger().debug(
+ 'Lock %s not acquired on %s, waiting %s seconds ...',
+ lock_id, lock_filename, poll_intervall
+ )
+ time.sleep(poll_intervall)
+ except:
+ # Something did go wrong, so decrement the counter.
+ with self._thread_lock:
+ self._lock_counter = max(0, self._lock_counter - 1)
+
+ raise
+ return _Acquire_ReturnProxy(lock = self)
+
+ def release(self, force = False):
+ """
+ Releases the file lock.
+
+ Please note, that the lock is only completly released, if the lock
+ counter is 0.
+
+ Also note, that the lock file itself is not automatically deleted.
+
+ :arg bool force:
+ If true, the lock counter is ignored and the lock is released in
+ every case.
+ """
+ with self._thread_lock:
+
+ if self.is_locked:
+ self._lock_counter -= 1
+
+ if self._lock_counter == 0 or force:
+ lock_id = id(self)
+ lock_filename = self._lock_file
+
+ logger().debug('Attempting to release lock %s on %s', lock_id, lock_filename)
+ self._release()
+ self._lock_counter = 0
+ logger().info('Lock %s released on %s', lock_id, lock_filename)
+
+ return None
+
+ def __enter__(self):
+ self.acquire()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.release()
+ return None
+
+ def __del__(self):
+ self.release(force = True)
+ return None
+
+
+# Windows locking mechanism
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+class WindowsFileLock(BaseFileLock):
+ """
+ Uses the :func:`msvcrt.locking` function to hard lock the lock file on
+ windows systems.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
+
+ try:
+ fd = os.open(self._lock_file, open_mode)
+ except OSError:
+ pass
+ else:
+ try:
+ msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
+ except (IOError, OSError):
+ os.close(fd)
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ fd = self._lock_file_fd
+ self._lock_file_fd = None
+ msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
+ os.close(fd)
+
+ try:
+ os.remove(self._lock_file)
+ # Probably another instance of the application
+ # that acquired the file lock.
+ except OSError:
+ pass
+ return None
+
+# Unix locking mechanism
+# ~~~~~~~~~~~~~~~~~~~~~~
+
+class UnixFileLock(BaseFileLock):
+ """
+ Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
+ fd = os.open(self._lock_file, open_mode)
+
+ try:
+ fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except (IOError, OSError):
+ os.close(fd)
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ # Do not remove the lockfile:
+ #
+ # https://github.com/benediktschmitt/py-filelock/issues/31
+ # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
+ fd = self._lock_file_fd
+ self._lock_file_fd = None
+ fcntl.flock(fd, fcntl.LOCK_UN)
+ os.close(fd)
+ return None
+
+# Soft lock
+# ~~~~~~~~~
+
+class SoftFileLock(BaseFileLock):
+ """
+ Simply watches the existence of the lock file.
+ """
+
+ def _acquire(self):
+ open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
+ try:
+ fd = os.open(self._lock_file, open_mode)
+ except (IOError, OSError):
+ pass
+ else:
+ self._lock_file_fd = fd
+ return None
+
+ def _release(self):
+ os.close(self._lock_file_fd)
+ self._lock_file_fd = None
+
+ try:
+ os.remove(self._lock_file)
+ # The file is already deleted and that's what we want.
+ except OSError:
+ pass
+ return None
+
+
+# Platform filelock
+# ~~~~~~~~~~~~~~~~~
+
+#: Alias for the lock, which should be used for the current platform. On
+#: Windows, this is an alias for :class:`WindowsFileLock`, on Unix for
+#: :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`.
+FileLock = None
+
+if msvcrt:
+ FileLock = WindowsFileLock
+elif fcntl:
+ FileLock = UnixFileLock
+else:
+ FileLock = SoftFileLock
+
+ if warnings is not None:
+ warnings.warn("only soft file lock is available")
diff --git a/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
new file mode 100644
index 00000000..f873eac8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
@@ -0,0 +1,327 @@
+# SPDX-License-Identifier: LGPL-2.1
+"""
+@package sensors.py
+Python Bindings for libsensors3
+
+use the documentation of libsensors for the low level API.
+see example.py for high level API usage.
+
+@author: Pavel Rojtberg (http://www.rojtberg.net)
+@see: https://github.com/paroj/sensors.py
+@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1>
+"""
+
+from ctypes import *
+import ctypes.util
+
+_libc = cdll.LoadLibrary(ctypes.util.find_library("c"))
+# see https://github.com/paroj/sensors.py/issues/1
+_libc.free.argtypes = [c_void_p]
+
+_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors"))
+
+version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii")
+
+
+class SensorsError(Exception):
+ pass
+
+
+class ErrorWildcards(SensorsError):
+ pass
+
+
+class ErrorNoEntry(SensorsError):
+ pass
+
+
+class ErrorAccessRead(SensorsError, OSError):
+ pass
+
+
+class ErrorKernel(SensorsError, OSError):
+ pass
+
+
+class ErrorDivZero(SensorsError, ZeroDivisionError):
+ pass
+
+
+class ErrorChipName(SensorsError):
+ pass
+
+
+class ErrorBusName(SensorsError):
+ pass
+
+
+class ErrorParse(SensorsError):
+ pass
+
+
+class ErrorAccessWrite(SensorsError, OSError):
+ pass
+
+
+class ErrorIO(SensorsError, IOError):
+ pass
+
+
+class ErrorRecursion(SensorsError):
+ pass
+
+
+_ERR_MAP = {
+ 1: ErrorWildcards,
+ 2: ErrorNoEntry,
+ 3: ErrorAccessRead,
+ 4: ErrorKernel,
+ 5: ErrorDivZero,
+ 6: ErrorChipName,
+ 7: ErrorBusName,
+ 8: ErrorParse,
+ 9: ErrorAccessWrite,
+ 10: ErrorIO,
+ 11: ErrorRecursion
+}
+
+
+def raise_sensor_error(errno, message=''):
+ raise _ERR_MAP[abs(errno)](message)
+
+
+class bus_id(Structure):
+ _fields_ = [("type", c_short),
+ ("nr", c_short)]
+
+
+class chip_name(Structure):
+ _fields_ = [("prefix", c_char_p),
+ ("bus", bus_id),
+ ("addr", c_int),
+ ("path", c_char_p)]
+
+
+class feature(Structure):
+ _fields_ = [("name", c_char_p),
+ ("number", c_int),
+ ("type", c_int)]
+
+ # sensors_feature_type
+ IN = 0x00
+ FAN = 0x01
+ TEMP = 0x02
+ POWER = 0x03
+ ENERGY = 0x04
+ CURR = 0x05
+ HUMIDITY = 0x06
+ MAX_MAIN = 0x7
+ VID = 0x10
+ INTRUSION = 0x11
+ MAX_OTHER = 0x12
+ BEEP_ENABLE = 0x18
+
+
+class subfeature(Structure):
+ _fields_ = [("name", c_char_p),
+ ("number", c_int),
+ ("type", c_int),
+ ("mapping", c_int),
+ ("flags", c_uint)]
+
+
+_hdl.sensors_get_detected_chips.restype = POINTER(chip_name)
+_hdl.sensors_get_features.restype = POINTER(feature)
+_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature)
+_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it
+_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not
+_hdl.sensors_strerror.restype = c_char_p
+
+### RAW API ###
+MODE_R = 1
+MODE_W = 2
+COMPUTE_MAPPING = 4
+
+
+def init(cfg_file=None):
+ file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None
+
+ result = _hdl.sensors_init(file)
+ if result != 0:
+ raise_sensor_error(result, "sensors_init failed")
+
+ if file is not None:
+ _libc.fclose(file)
+
+
+def cleanup():
+ _hdl.sensors_cleanup()
+
+
+def parse_chip_name(orig_name):
+ ret = chip_name()
+ err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret))
+
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+ return ret
+
+
+def strerror(errnum):
+ return _hdl.sensors_strerror(errnum).decode("utf-8")
+
+
+def free_chip_name(chip):
+ _hdl.sensors_free_chip_name(byref(chip))
+
+
+def get_detected_chips(match, nr):
+ """
+ @return: (chip, next nr to query)
+ """
+ _nr = c_int(nr)
+
+ if match is not None:
+ match = byref(match)
+
+ chip = _hdl.sensors_get_detected_chips(match, byref(_nr))
+ chip = chip.contents if bool(chip) else None
+ return chip, _nr.value
+
+
+def chip_snprintf_name(chip, buffer_size=200):
+ """
+ @param buffer_size defaults to the size used in the sensors utility
+ """
+ ret = create_string_buffer(buffer_size)
+ err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip))
+
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+ return ret.value.decode("utf-8")
+
+
+def do_chip_sets(chip):
+ """
+ @attention this function was not tested
+ """
+ err = _hdl.sensors_do_chip_sets(byref(chip))
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+
+def get_adapter_name(bus):
+ return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8")
+
+
+def get_features(chip, nr):
+ """
+ @return: (feature, next nr to query)
+ """
+ _nr = c_int(nr)
+ feature = _hdl.sensors_get_features(byref(chip), byref(_nr))
+ feature = feature.contents if bool(feature) else None
+ return feature, _nr.value
+
+
+def get_label(chip, feature):
+ ptr = _hdl.sensors_get_label(byref(chip), byref(feature))
+ val = cast(ptr, c_char_p).value.decode("utf-8")
+ _libc.free(ptr)
+ return val
+
+
+def get_all_subfeatures(chip, feature, nr):
+ """
+ @return: (subfeature, next nr to query)
+ """
+ _nr = c_int(nr)
+ subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr))
+ subfeature = subfeature.contents if bool(subfeature) else None
+ return subfeature, _nr.value
+
+
+def get_value(chip, subfeature_nr):
+ val = c_double()
+ err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val))
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+ return val.value
+
+
+def set_value(chip, subfeature_nr, value):
+ """
+ @attention this function was not tested
+ """
+ val = c_double(value)
+ err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val))
+ if err < 0:
+ raise_sensor_error(err, strerror(err))
+
+
+### Convenience API ###
+class ChipIterator:
+ def __init__(self, match=None):
+ self.match = parse_chip_name(match) if match is not None else None
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ chip, self.nr = get_detected_chips(self.match, self.nr)
+
+ if chip is None:
+ raise StopIteration
+
+ return chip
+
+ def __del__(self):
+ if self.match is not None:
+ free_chip_name(self.match)
+
+ def next(self): # python2 compability
+ return self.__next__()
+
+
+class FeatureIterator:
+ def __init__(self, chip):
+ self.chip = chip
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ feature, self.nr = get_features(self.chip, self.nr)
+
+ if feature is None:
+ raise StopIteration
+
+ return feature
+
+ def next(self): # python2 compability
+ return self.__next__()
+
+
+class SubFeatureIterator:
+ def __init__(self, chip, feature):
+ self.chip = chip
+ self.feature = feature
+ self.nr = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr)
+
+ if subfeature is None:
+ raise StopIteration
+
+ return subfeature
+
+ def next(self): # python2 compability
+ return self.__next__()
diff --git a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
new file mode 100644
index 00000000..a65a304b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
@@ -0,0 +1,74 @@
+# Minecraft Remote Console module.
+#
+# Copyright (C) 2015 Barnaby Gale
+#
+# SPDX-License-Identifier: MIT
+
+import socket
+import select
+import struct
+import time
+
+
+class MCRconException(Exception):
+ pass
+
+
+class MCRcon(object):
+ socket = None
+
+ def connect(self, host, port, password):
+ if self.socket is not None:
+ raise MCRconException("Already connected")
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.socket.settimeout(0.9)
+ self.socket.connect((host, port))
+ self.send(3, password)
+
+ def disconnect(self):
+ if self.socket is None:
+ raise MCRconException("Already disconnected")
+ self.socket.close()
+ self.socket = None
+
+ def read(self, length):
+ data = b""
+ while len(data) < length:
+ data += self.socket.recv(length - len(data))
+ return data
+
+ def send(self, out_type, out_data):
+ if self.socket is None:
+ raise MCRconException("Must connect before sending data")
+
+ # Send a request packet
+ out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00'
+ out_length = struct.pack('<i', len(out_payload))
+ self.socket.send(out_length + out_payload)
+
+ # Read response packets
+ in_data = ""
+ while True:
+ # Read a packet
+ in_length, = struct.unpack('<i', self.read(4))
+ in_payload = self.read(in_length)
+ in_id = struct.unpack('<ii', in_payload[:8])
+ in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
+
+ # Sanity checks
+ if in_padding != b'\x00\x00':
+ raise MCRconException("Incorrect padding")
+ if in_id == -1:
+ raise MCRconException("Login failed")
+
+ # Record the response
+ in_data += in_data_partial.decode('utf8')
+
+ # If there's nothing more to receive, return the response
+ if len(select.select([self.socket], [], [], 0)[0]) == 0:
+ return in_data
+
+ def command(self, command):
+ result = self.send(2, command)
+ time.sleep(0.003) # MC-72390 workaround
+ return result
diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
new file mode 100644
index 00000000..4ebd556c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: Apache-2.0
+"""
+ monotonic
+ ~~~~~~~~~
+
+ This module provides a ``monotonic()`` function which returns the
+ value (in fractional seconds) of a clock which never goes backwards.
+
+ On Python 3.3 or newer, ``monotonic`` will be an alias of
+ ``time.monotonic`` from the standard library. On older versions,
+ it will fall back to an equivalent implementation:
+
+ +-------------+----------------------------------------+
+ | Linux, BSD | ``clock_gettime(3)`` |
+ +-------------+----------------------------------------+
+ | Windows | ``GetTickCount`` or ``GetTickCount64`` |
+ +-------------+----------------------------------------+
+ | OS X | ``mach_absolute_time`` |
+ +-------------+----------------------------------------+
+
+ If no suitable implementation exists for the current platform,
+ attempting to import this module (or to import from it) will
+ cause a ``RuntimeError`` exception to be raised.
+
+
+ Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org>
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+"""
+import time
+
+
+__all__ = ('monotonic',)
+
+
+try:
+ monotonic = time.monotonic
+except AttributeError:
+ import ctypes
+ import ctypes.util
+ import os
+ import sys
+ import threading
+
+
+ def clock_clock_gettime_c_library():
+ return ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_rt_library():
+ return ctypes.CDLL(ctypes.util.find_library('rt'), use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_c_library_synology6():
+ return ctypes.CDLL('/usr/lib/libc.so.6', use_errno=True).clock_gettime
+
+
+ def clock_clock_gettime_rt_library_synology6():
+ return ctypes.CDLL('/usr/lib/librt.so.1', use_errno=True).clock_gettime
+
+
+ def clock_gettime_linux():
+ # see https://github.com/netdata/netdata/issues/7976
+ order = [
+ clock_clock_gettime_c_library,
+ clock_clock_gettime_rt_library,
+ clock_clock_gettime_c_library_synology6,
+ clock_clock_gettime_rt_library_synology6,
+ ]
+
+ for gettime in order:
+ try:
+ return gettime()
+ except (RuntimeError, AttributeError, OSError):
+ continue
+ raise RuntimeError('can not find c and rt libraries')
+
+
+ try:
+ if sys.platform == 'darwin': # OS X, iOS
+ # See Technical Q&A QA1398 of the Mac Developer Library:
+ # <https://developer.apple.com/library/mac/qa/qa1398/>
+ libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
+
+ class mach_timebase_info_data_t(ctypes.Structure):
+ """System timebase info. Defined in <mach/mach_time.h>."""
+ _fields_ = (('numer', ctypes.c_uint32),
+ ('denom', ctypes.c_uint32))
+
+ mach_absolute_time = libc.mach_absolute_time
+ mach_absolute_time.restype = ctypes.c_uint64
+
+ timebase = mach_timebase_info_data_t()
+ libc.mach_timebase_info(ctypes.byref(timebase))
+ ticks_per_second = timebase.numer / timebase.denom * 1.0e9
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return mach_absolute_time() / ticks_per_second
+
+ elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
+ if sys.platform.startswith('cygwin'):
+ # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since
+ # version 1.7.6. Using raw WinAPI for maximum version compatibility.
+
+ # Ugly hack using the wrong calling convention (in 32-bit mode)
+ # because ctypes has no windll under cygwin (and it also seems that
+ # the code letting you select stdcall in _ctypes doesn't exist under
+ # the preprocessor definitions relevant to cygwin).
+ # This is 'safe' because:
+ # 1. The ABI of GetTickCount and GetTickCount64 is identical for
+ # both calling conventions because they both have no parameters.
+ # 2. libffi masks the problem because after making the call it doesn't
+ # touch anything through esp and epilogue code restores a correct
+ # esp from ebp afterwards.
+ try:
+ kernel32 = ctypes.cdll.kernel32
+ except OSError: # 'No such file or directory'
+ kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll')
+ else:
+ kernel32 = ctypes.windll.kernel32
+
+ GetTickCount64 = getattr(kernel32, 'GetTickCount64', None)
+ if GetTickCount64:
+ # Windows Vista / Windows Server 2008 or newer.
+ GetTickCount64.restype = ctypes.c_ulonglong
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ return GetTickCount64() / 1000.0
+
+ else:
+ # Before Windows Vista.
+ GetTickCount = kernel32.GetTickCount
+ GetTickCount.restype = ctypes.c_uint32
+
+ get_tick_count_lock = threading.Lock()
+ get_tick_count_last_sample = 0
+ get_tick_count_wraparounds = 0
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ global get_tick_count_last_sample
+ global get_tick_count_wraparounds
+
+ with get_tick_count_lock:
+ current_sample = GetTickCount()
+ if current_sample < get_tick_count_last_sample:
+ get_tick_count_wraparounds += 1
+ get_tick_count_last_sample = current_sample
+
+ final_milliseconds = get_tick_count_wraparounds << 32
+ final_milliseconds += get_tick_count_last_sample
+ return final_milliseconds / 1000.0
+
+ else:
+ clock_gettime = clock_gettime_linux()
+
+ class timespec(ctypes.Structure):
+ """Time specification, as described in clock_gettime(3)."""
+ _fields_ = (('tv_sec', ctypes.c_long),
+ ('tv_nsec', ctypes.c_long))
+
+ if sys.platform.startswith('linux'):
+ CLOCK_MONOTONIC = 1
+ elif sys.platform.startswith('freebsd'):
+ CLOCK_MONOTONIC = 4
+ elif sys.platform.startswith('sunos5'):
+ CLOCK_MONOTONIC = 4
+ elif 'bsd' in sys.platform:
+ CLOCK_MONOTONIC = 3
+ elif sys.platform.startswith('aix'):
+ CLOCK_MONOTONIC = ctypes.c_longlong(10)
+
+ def monotonic():
+ """Monotonic clock, cannot go backward."""
+ ts = timespec()
+ if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
+ errno = ctypes.get_errno()
+ raise OSError(errno, os.strerror(errno))
+ return ts.tv_sec + ts.tv_nsec / 1.0e9
+
+ # Perform a sanity-check.
+ if monotonic() - monotonic() > 0:
+ raise ValueError('monotonic() is not monotonic!')
+
+ except Exception as e:
+ raise RuntimeError('no suitable implementation for this system: ' + repr(e))
diff --git a/collectors/python.d.plugin/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
new file mode 100644
index 00000000..589401b8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
@@ -0,0 +1,110 @@
+# Copyright (c) 2009 Raymond Hettinger
+#
+# SPDX-License-Identifier: MIT
+
+from UserDict import DictMixin
+
+
+class OrderedDict(dict, DictMixin):
+
+ def __init__(self, *args, **kwds):
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__end
+ except AttributeError:
+ self.clear()
+ self.update(*args, **kwds)
+
+ def clear(self):
+ self.__end = end = []
+ end += [None, end, end] # sentinel node for doubly linked list
+ self.__map = {} # key --> [key, prev, next]
+ dict.clear(self)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ end = self.__end
+ curr = end[1]
+ curr[2] = end[1] = self.__map[key] = [key, curr, end]
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ key, prev, next = self.__map.pop(key)
+ prev[2] = next
+ next[1] = prev
+
+ def __iter__(self):
+ end = self.__end
+ curr = end[2]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[2]
+
+ def __reversed__(self):
+ end = self.__end
+ curr = end[1]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[1]
+
+ def popitem(self, last=True):
+ if not self:
+ raise KeyError('dictionary is empty')
+ if last:
+ key = reversed(self).next()
+ else:
+ key = iter(self).next()
+ value = self.pop(key)
+ return key, value
+
+ def __reduce__(self):
+ items = [[k, self[k]] for k in self]
+ tmp = self.__map, self.__end
+ del self.__map, self.__end
+ inst_dict = vars(self).copy()
+ self.__map, self.__end = tmp
+ if inst_dict:
+ return self.__class__, (items,), inst_dict
+ return self.__class__, (items,)
+
+ def keys(self):
+ return list(self)
+
+ setdefault = DictMixin.setdefault
+ update = DictMixin.update
+ pop = DictMixin.pop
+ values = DictMixin.values
+ items = DictMixin.items
+ iterkeys = DictMixin.iterkeys
+ itervalues = DictMixin.itervalues
+ iteritems = DictMixin.iteritems
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+
+ def copy(self):
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ if isinstance(other, OrderedDict):
+ if len(self) != len(other):
+ return False
+ for p, q in zip(self.items(), other.items()):
+ if p != q:
+ return False
+ return True
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/collectors/python.d.plugin/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
new file mode 100644
index 00000000..3add8481
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: MIT
+"""
+urllib3 - Thread-safe connection pooling and re-using.
+"""
+
+from __future__ import absolute_import
+import warnings
+
+from .connectionpool import (
+ HTTPConnectionPool,
+ HTTPSConnectionPool,
+ connection_from_url
+)
+
+from . import exceptions
+from .filepost import encode_multipart_formdata
+from .poolmanager import PoolManager, ProxyManager, proxy_from_url
+from .response import HTTPResponse
+from .util.request import make_headers
+from .util.url import get_host
+from .util.timeout import Timeout
+from .util.retry import Retry
+
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+try: # Python 2.7+
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
+__license__ = 'MIT'
+__version__ = '1.21.1'
+
+__all__ = (
+ 'HTTPConnectionPool',
+ 'HTTPSConnectionPool',
+ 'PoolManager',
+ 'ProxyManager',
+ 'HTTPResponse',
+ 'Retry',
+ 'Timeout',
+ 'add_stderr_logger',
+ 'connection_from_url',
+ 'disable_warnings',
+ 'encode_multipart_formdata',
+ 'get_host',
+ 'make_headers',
+ 'proxy_from_url',
+)
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+
+def add_stderr_logger(level=logging.DEBUG):
+ """
+ Helper for quickly adding a StreamHandler to the logger. Useful for
+ debugging.
+
+ Returns the handler after adding it.
+ """
+ # This method needs to be in this __init__.py to get the __name__ correct
+ # even if urllib3 is vendored within another package.
+ logger = logging.getLogger(__name__)
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
+ logger.addHandler(handler)
+ logger.setLevel(level)
+ logger.debug('Added a stderr logging handler to logger: %s', __name__)
+ return handler
+
+
+# ... Clean up.
+del NullHandler
+
+
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
+# SecurityWarning's always go off by default.
+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
+ append=True)
+# SNIMissingWarnings should go off only once.
+warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
+
+
+def disable_warnings(category=exceptions.HTTPWarning):
+ """
+ Helper for quickly disabling all urllib3 warnings.
+ """
+ warnings.simplefilter('ignore', category)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
new file mode 100644
index 00000000..2a6b3ec7
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
@@ -0,0 +1,320 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+
+try:
+ from collections import Mapping, MutableMapping
+except ImportError:
+ from collections.abc import Mapping, MutableMapping
+
+try:
+ from threading import RLock
+except ImportError: # Platform-specific: No threads available
+ class RLock:
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+try: # Python 2.7+
+ from collections import OrderedDict
+except ImportError:
+ from .packages.ordered_dict import OrderedDict
+from .packages.six import iterkeys, itervalues, PY3
+
+
+__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
+
+
+_Null = object()
+
+
+class RecentlyUsedContainer(MutableMapping):
+ """
+ Provides a thread-safe dict-like container which maintains up to
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
+ ``maxsize``.
+
+ :param maxsize:
+ Maximum number of recent elements to retain.
+
+ :param dispose_func:
+ Every time an item is evicted from the container,
+ ``dispose_func(value)`` is called. Callback which will get called
+ """
+
+ ContainerCls = OrderedDict
+
+ def __init__(self, maxsize=10, dispose_func=None):
+ self._maxsize = maxsize
+ self.dispose_func = dispose_func
+
+ self._container = self.ContainerCls()
+ self.lock = RLock()
+
+ def __getitem__(self, key):
+ # Re-insert the item, moving it to the end of the eviction line.
+ with self.lock:
+ item = self._container.pop(key)
+ self._container[key] = item
+ return item
+
+ def __setitem__(self, key, value):
+ evicted_value = _Null
+ with self.lock:
+ # Possibly evict the existing value of 'key'
+ evicted_value = self._container.get(key, _Null)
+ self._container[key] = value
+
+ # If we didn't evict an existing value, we might have to evict the
+ # least recently used item from the beginning of the container.
+ if len(self._container) > self._maxsize:
+ _key, evicted_value = self._container.popitem(last=False)
+
+ if self.dispose_func and evicted_value is not _Null:
+ self.dispose_func(evicted_value)
+
+ def __delitem__(self, key):
+ with self.lock:
+ value = self._container.pop(key)
+
+ if self.dispose_func:
+ self.dispose_func(value)
+
+ def __len__(self):
+ with self.lock:
+ return len(self._container)
+
+ def __iter__(self):
+ raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
+
+ def clear(self):
+ with self.lock:
+ # Copy pointers to all values, then wipe the mapping
+ values = list(itervalues(self._container))
+ self._container.clear()
+
+ if self.dispose_func:
+ for value in values:
+ self.dispose_func(value)
+
+ def keys(self):
+ with self.lock:
+ return list(iterkeys(self._container))
+
+
+class HTTPHeaderDict(MutableMapping):
+ """
+ :param headers:
+ An iterable of field-value pairs. Must not contain multiple field names
+ when compared case-insensitively.
+
+ :param kwargs:
+ Additional field-value pairs to pass in to ``dict.update``.
+
+ A ``dict`` like container for storing HTTP Headers.
+
+ Field names are stored and compared case-insensitively in compliance with
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
+ case-insensitive pair.
+
+ Using ``__setitem__`` syntax overwrites fields that compare equal
+ case-insensitively in order to maintain ``dict``'s api. For fields that
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+ in a loop.
+
+ If multiple fields that are equal case-insensitively are passed to the
+ constructor or ``.update``, the behavior is undefined and some will be
+ lost.
+
+ >>> headers = HTTPHeaderDict()
+ >>> headers.add('Set-Cookie', 'foo=bar')
+ >>> headers.add('set-cookie', 'baz=quxx')
+ >>> headers['content-length'] = '7'
+ >>> headers['SET-cookie']
+ 'foo=bar, baz=quxx'
+ >>> headers['Content-Length']
+ '7'
+ """
+
+ def __init__(self, headers=None, **kwargs):
+ super(HTTPHeaderDict, self).__init__()
+ self._container = OrderedDict()
+ if headers is not None:
+ if isinstance(headers, HTTPHeaderDict):
+ self._copy_from(headers)
+ else:
+ self.extend(headers)
+ if kwargs:
+ self.extend(kwargs)
+
+ def __setitem__(self, key, val):
+ self._container[key.lower()] = [key, val]
+ return self._container[key.lower()]
+
+ def __getitem__(self, key):
+ val = self._container[key.lower()]
+ return ', '.join(val[1:])
+
+ def __delitem__(self, key):
+ del self._container[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self._container
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
+ return False
+ if not isinstance(other, type(self)):
+ other = type(self)(other)
+ return (dict((k.lower(), v) for k, v in self.itermerged()) ==
+ dict((k.lower(), v) for k, v in other.itermerged()))
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if not PY3: # Python 2
+ iterkeys = MutableMapping.iterkeys
+ itervalues = MutableMapping.itervalues
+
+ __marker = object()
+
+ def __len__(self):
+ return len(self._container)
+
+ def __iter__(self):
+ # Only provide the originally cased names
+ for vals in self._container.values():
+ yield vals[0]
+
+ def pop(self, key, default=__marker):
+ '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ '''
+ # Using the MutableMapping function directly fails due to the private marker.
+ # Using ordinary dict.pop would expose the internal structures.
+ # So let's reinvent the wheel.
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def discard(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def add(self, key, val):
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
+ exists.
+
+ >>> headers = HTTPHeaderDict(foo='bar')
+ >>> headers.add('Foo', 'baz')
+ >>> headers['foo']
+ 'bar, baz'
+ """
+ key_lower = key.lower()
+ new_vals = [key, val]
+ # Keep the common case aka no item present as fast as possible
+ vals = self._container.setdefault(key_lower, new_vals)
+ if new_vals is not vals:
+ vals.append(val)
+
+ def extend(self, *args, **kwargs):
+ """Generic import function for any type of header-like object.
+ Adapted version of MutableMapping.update in order to insert items
+ with self.add instead of self.__setitem__
+ """
+ if len(args) > 1:
+ raise TypeError("extend() takes at most 1 positional "
+ "arguments ({0} given)".format(len(args)))
+ other = args[0] if len(args) >= 1 else ()
+
+ if isinstance(other, HTTPHeaderDict):
+ for key, val in other.iteritems():
+ self.add(key, val)
+ elif isinstance(other, Mapping):
+ for key in other:
+ self.add(key, other[key])
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self.add(key, other[key])
+ else:
+ for key, value in other:
+ self.add(key, value)
+
+ for key, value in kwargs.items():
+ self.add(key, value)
+
+ def getlist(self, key):
+ """Returns a list of all the values for the named field. Returns an
+ empty list if the key doesn't exist."""
+ try:
+ vals = self._container[key.lower()]
+ except KeyError:
+ return []
+ else:
+ return vals[1:]
+
+ # Backwards compatibility for httplib
+ getheaders = getlist
+ getallmatchingheaders = getlist
+ iget = getlist
+
+ def __repr__(self):
+ return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+ def _copy_from(self, other):
+ for key in other:
+ val = other.getlist(key)
+ if isinstance(val, list):
+ # Don't need to convert tuples
+ val = list(val)
+ self._container[key.lower()] = [key] + val
+
+ def copy(self):
+ clone = type(self)()
+ clone._copy_from(self)
+ return clone
+
+ def iteritems(self):
+ """Iterate over all header lines, including duplicate ones."""
+ for key in self:
+ vals = self._container[key.lower()]
+ for val in vals[1:]:
+ yield vals[0], val
+
+ def itermerged(self):
+ """Iterate over all headers, merging duplicate ones together."""
+ for key in self:
+ val = self._container[key.lower()]
+ yield val[0], ', '.join(val[1:])
+
+ def items(self):
+ return list(self.iteritems())
+
+ @classmethod
+ def from_httplib(cls, message): # Python 2
+ """Read headers from a Python 2 httplib message object."""
+ # python2.7 does not expose a proper API for exporting multiheaders
+ # efficiently. This function re-reads raw lines from the message
+ # object and extracts the multiheaders properly.
+ headers = []
+
+ for line in message.headers:
+ if line.startswith((' ', '\t')):
+ key, value = headers[-1]
+ headers[-1] = (key, value + '\r\n' + line.rstrip())
+ continue
+
+ key, value = line.split(':', 1)
+ headers.append((key, value.strip()))
+
+ return cls(headers)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py
new file mode 100644
index 00000000..f757493c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/connection.py
@@ -0,0 +1,374 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import datetime
+import logging
+import os
+import sys
+import socket
+from socket import error as SocketError, timeout as SocketTimeout
+import warnings
+from .packages import six
+from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
+from .packages.six.moves.http_client import HTTPException # noqa: F401
+
+try: # Compiled with SSL?
+ import ssl
+ BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError): # Platform-specific: No SSL.
+ ssl = None
+
+ class BaseSSLError(BaseException):
+ pass
+
+
+try: # Python 3:
+ # Not a no-op, we're adding this to the namespace so it can be imported.
+ ConnectionError = ConnectionError
+except NameError: # Python 2:
+ class ConnectionError(Exception):
+ pass
+
+
+from .exceptions import (
+ NewConnectionError,
+ ConnectTimeoutError,
+ SubjectAltNameWarning,
+ SystemTimeWarning,
+)
+from .packages.ssl_match_hostname import match_hostname, CertificateError
+
+from .util.ssl_ import (
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ assert_fingerprint,
+ create_urllib3_context,
+ ssl_wrap_socket
+)
+
+
+from .util import connection
+
+from ._collections import HTTPHeaderDict
+
+log = logging.getLogger(__name__)
+
+port_by_scheme = {
+ 'http': 80,
+ 'https': 443,
+}
+
+# When updating RECENT_DATE, move it to
+# within two years of the current date, and no
+# earlier than 6 months ago.
+RECENT_DATE = datetime.date(2016, 1, 1)
+
+
+class DummyConnection(object):
+ """Used to detect a failed ConnectionCls import."""
+ pass
+
+
+class HTTPConnection(_HTTPConnection, object):
+ """
+ Based on httplib.HTTPConnection but provides an extra constructor
+ backwards-compatibility layer between older and newer Pythons.
+
+ Additional keyword parameters are used to configure attributes of the connection.
+ Accepted parameters include:
+
+ - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+ - ``source_address``: Set the source address for the current connection.
+
+ .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
+
+ - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+ defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+ Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+ For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+ you might pass::
+
+ HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ]
+
+ Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+ """
+
+ default_port = port_by_scheme['http']
+
+ #: Disable Nagle's algorithm by default.
+ #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+ default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+ #: Whether this connection verifies the host's certificate.
+ is_verified = False
+
+ def __init__(self, *args, **kw):
+ if six.PY3: # Python 3
+ kw.pop('strict', None)
+
+ # Pre-set source_address in case we have an older Python like 2.6.
+ self.source_address = kw.get('source_address')
+
+ if sys.version_info < (2, 7): # Python 2.6
+ # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
+ # not newer versions. We can still use it when creating a
+ # connection though, so we pop it *after* we have saved it as
+ # self.source_address.
+ kw.pop('source_address', None)
+
+ #: The socket options provided by the user. If no options are
+ #: provided, we use the default options.
+ self.socket_options = kw.pop('socket_options', self.default_socket_options)
+
+ # Superclass also sets self.source_address in Python 2.7+.
+ _HTTPConnection.__init__(self, *args, **kw)
+
+ def _new_conn(self):
+ """ Establish a socket connection and set nodelay settings on it.
+
+ :return: New socket connection.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = connection.create_connection(
+ (self.host, self.port), self.timeout, **extra_kw)
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except SocketError as e:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+ def _prepare_conn(self, conn):
+ self.sock = conn
+ # the _tunnel_host attribute was added in python 2.6.3 (via
+ # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
+ # not have them.
+ if getattr(self, '_tunnel_host', None):
+ # TODO: Fix tunnel so it doesn't depend on self.sock state.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ def request_chunked(self, method, url, body=None, headers=None):
+ """
+ Alternative to the common request method, which sends the
+ body with chunked encoding and not as one block
+ """
+ headers = HTTPHeaderDict(headers if headers is not None else {})
+ skip_accept_encoding = 'accept-encoding' in headers
+ skip_host = 'host' in headers
+ self.putrequest(
+ method,
+ url,
+ skip_accept_encoding=skip_accept_encoding,
+ skip_host=skip_host
+ )
+ for header, value in headers.items():
+ self.putheader(header, value)
+ if 'transfer-encoding' not in headers:
+ self.putheader('Transfer-Encoding', 'chunked')
+ self.endheaders()
+
+ if body is not None:
+ stringish_types = six.string_types + (six.binary_type,)
+ if isinstance(body, stringish_types):
+ body = (body,)
+ for chunk in body:
+ if not chunk:
+ continue
+ if not isinstance(chunk, six.binary_type):
+ chunk = chunk.encode('utf8')
+ len_str = hex(len(chunk))[2:]
+ self.send(len_str.encode('utf-8'))
+ self.send(b'\r\n')
+ self.send(chunk)
+ self.send(b'\r\n')
+
+ # After the if clause, to always have a closed body
+ self.send(b'0\r\n\r\n')
+
+
+class HTTPSConnection(HTTPConnection):
+ default_port = port_by_scheme['https']
+
+ ssl_version = None
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ ssl_context=None, **kw):
+
+ HTTPConnection.__init__(self, host, port, strict=strict,
+ timeout=timeout, **kw)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.ssl_context = ssl_context
+
+ # Required property for Google AppEngine 1.9.0 which otherwise causes
+ # HTTPS requests to go out as HTTP. (See Issue #356)
+ self._protocol = 'https'
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(None),
+ cert_reqs=resolve_cert_reqs(None),
+ )
+
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ssl_context=self.ssl_context,
+ )
+
+
+class VerifiedHTTPSConnection(HTTPSConnection):
+ """
+ Based on httplib.HTTPSConnection but wraps the socket with
+ SSL certification.
+ """
+ cert_reqs = None
+ ca_certs = None
+ ca_cert_dir = None
+ ssl_version = None
+ assert_fingerprint = None
+
+ def set_cert(self, key_file=None, cert_file=None,
+ cert_reqs=None, ca_certs=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None):
+ """
+ This method should only be called once, before the connection is used.
+ """
+ # If cert_reqs is not provided, we can try to guess. If the user gave
+ # us a cert database, we assume they want to use it: otherwise, if
+ # they gave us an SSL Context object we should use whatever is set for
+ # it.
+ if cert_reqs is None:
+ if ca_certs or ca_cert_dir:
+ cert_reqs = 'CERT_REQUIRED'
+ elif self.ssl_context is not None:
+ cert_reqs = self.ssl_context.verify_mode
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+
+ def connect(self):
+ # Add certificate verification
+ conn = self._new_conn()
+
+ hostname = self.host
+ if getattr(self, '_tunnel_host', None):
+ # _tunnel_host was added in Python 2.6.3
+ # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
+
+ self.sock = conn
+ # Calls self._set_hostport(), so self.host is
+ # self._tunnel_host below.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ # Override the host with the one we're requesting data from.
+ hostname = self._tunnel_host
+
+ is_time_off = datetime.date.today() < RECENT_DATE
+ if is_time_off:
+ warnings.warn((
+ 'System time is way off (before {0}). This will probably '
+ 'lead to SSL verification errors').format(RECENT_DATE),
+ SystemTimeWarning
+ )
+
+ # Wrap socket using verification with the root certs in
+ # trusted_root_certs
+ if self.ssl_context is None:
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(self.ssl_version),
+ cert_reqs=resolve_cert_reqs(self.cert_reqs),
+ )
+
+ context = self.ssl_context
+ context.verify_mode = resolve_cert_reqs(self.cert_reqs)
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ server_hostname=hostname,
+ ssl_context=context)
+
+ if self.assert_fingerprint:
+ assert_fingerprint(self.sock.getpeercert(binary_form=True),
+ self.assert_fingerprint)
+ elif context.verify_mode != ssl.CERT_NONE \
+ and not getattr(context, 'check_hostname', False) \
+ and self.assert_hostname is not False:
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = self.sock.getpeercert()
+ if not cert.get('subjectAltName', ()):
+ warnings.warn((
+ 'Certificate for {0} has no `subjectAltName`, falling back to check for a '
+ '`commonName` for now. This feature is being removed by major browsers and '
+ 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
+ 'for details.)'.format(hostname)),
+ SubjectAltNameWarning
+ )
+ _match_hostname(cert, self.assert_hostname or hostname)
+
+ self.is_verified = (
+ context.verify_mode == ssl.CERT_REQUIRED or
+ self.assert_fingerprint is not None
+ )
+
+
+def _match_hostname(cert, asserted_hostname):
+ try:
+ match_hostname(cert, asserted_hostname)
+ except CertificateError as e:
+ log.error(
+ 'Certificate did not match expected hostname: %s. '
+ 'Certificate: %s', asserted_hostname, cert
+ )
+ # Add cert to exception and reraise so client code can inspect
+ # the cert when catching the exception, if they want to
+ e._peer_cert = cert
+ raise
+
+
+if ssl:
+ # Make a copy for testing.
+ UnverifiedHTTPSConnection = HTTPSConnection
+ HTTPSConnection = VerifiedHTTPSConnection
+else:
+ HTTPSConnection = DummyConnection
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
new file mode 100644
index 00000000..90e4c86a
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
@@ -0,0 +1,900 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import errno
+import logging
+import sys
+import warnings
+
+from socket import error as SocketError, timeout as SocketTimeout
+import socket
+
+
+from .exceptions import (
+ ClosedPoolError,
+ ProtocolError,
+ EmptyPoolError,
+ HeaderParsingError,
+ HostChangedError,
+ LocationValueError,
+ MaxRetryError,
+ ProxyError,
+ ReadTimeoutError,
+ SSLError,
+ TimeoutError,
+ InsecureRequestWarning,
+ NewConnectionError,
+)
+from .packages.ssl_match_hostname import CertificateError
+from .packages import six
+from .packages.six.moves import queue
+from .connection import (
+ port_by_scheme,
+ DummyConnection,
+ HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
+ HTTPException, BaseSSLError,
+)
+from .request import RequestMethods
+from .response import HTTPResponse
+
+from .util.connection import is_connection_dropped
+from .util.request import set_file_position
+from .util.response import assert_header_parsing
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host, Url
+
+
+if six.PY2:
+ # Queue is imported for side effects on MS Windows
+ import Queue as _unused_module_Queue # noqa: F401
+
+xrange = six.moves.xrange
+
+log = logging.getLogger(__name__)
+
+_Default = object()
+
+
+# Pool objects
+class ConnectionPool(object):
+ """
+ Base class for all connection pools, such as
+ :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+ """
+
+ scheme = None
+ QueueCls = queue.LifoQueue
+
+ def __init__(self, host, port=None):
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ self.host = _ipv6_host(host).lower()
+ self.port = port
+
+ def __str__(self):
+ return '%s(host=%r, port=%r)' % (type(self).__name__,
+ self.host, self.port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ pass
+
+
+# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
+_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
+
+
+class HTTPConnectionPool(ConnectionPool, RequestMethods):
+ """
+ Thread-safe connection pool for one host.
+
+ :param host:
+ Host used for this HTTP Connection (e.g. "localhost"), passed into
+ :class:`httplib.HTTPConnection`.
+
+ :param port:
+ Port used for this HTTP Connection (None is equivalent to 80), passed
+ into :class:`httplib.HTTPConnection`.
+
+ :param strict:
+ Causes BadStatusLine to be raised if the status line can't be parsed
+ as a valid HTTP/1.0 or 1.1 status line, passed into
+ :class:`httplib.HTTPConnection`.
+
+ .. note::
+ Only works in Python 2. This parameter is ignored in Python 3.
+
+ :param timeout:
+ Socket timeout in seconds for each individual connection. This can
+ be a float or integer, which sets the timeout for the HTTP request,
+ or an instance of :class:`urllib3.util.Timeout` which gives you more
+ fine-grained control over request timeouts. After the constructor has
+ been parsed, this is always a `urllib3.util.Timeout` object.
+
+ :param maxsize:
+ Number of connections to save that can be reused. More than 1 is useful
+ in multithreaded situations. If ``block`` is set to False, more
+ connections will be created but they will not be saved once they've
+ been used.
+
+ :param block:
+ If set to True, no more than ``maxsize`` connections will be used at
+ a time. When no free connections are available, the call will block
+ until a connection has been released. This is a useful side effect for
+ particular multithreaded situations where one does not want to use more
+ than maxsize connections per host to prevent flooding.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param retries:
+ Retry configuration to use by default with requests in this pool.
+
+ :param _proxy:
+ Parsed proxy URL, should not be used directly, instead, see
+ :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param _proxy_headers:
+ A dictionary with proxy headers, should not be used directly,
+ instead, see :class:`urllib3.connectionpool.ProxyManager`"
+
+ :param \\**conn_kw:
+ Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+ :class:`urllib3.connection.HTTPSConnection` instances.
+ """
+
+ scheme = 'http'
+ ConnectionCls = HTTPConnection
+ ResponseCls = HTTPResponse
+
+ def __init__(self, host, port=None, strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
+ headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ **conn_kw):
+ ConnectionPool.__init__(self, host, port)
+ RequestMethods.__init__(self, headers)
+
+ self.strict = strict
+
+ if not isinstance(timeout, Timeout):
+ timeout = Timeout.from_float(timeout)
+
+ if retries is None:
+ retries = Retry.DEFAULT
+
+ self.timeout = timeout
+ self.retries = retries
+
+ self.pool = self.QueueCls(maxsize)
+ self.block = block
+
+ self.proxy = _proxy
+ self.proxy_headers = _proxy_headers or {}
+
+ # Fill the queue up so that doing get() on it will block properly
+ for _ in xrange(maxsize):
+ self.pool.put(None)
+
+ # These are mostly for testing and debugging purposes.
+ self.num_connections = 0
+ self.num_requests = 0
+ self.conn_kw = conn_kw
+
+ if self.proxy:
+ # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+ # We cannot know if the user has added default socket options, so we cannot replace the
+ # list.
+ self.conn_kw.setdefault('socket_options', [])
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`HTTPConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTP connection (%d): %s",
+ self.num_connections, self.host)
+
+ conn = self.ConnectionCls(host=self.host, port=self.port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+ return conn
+
+ def _get_conn(self, timeout=None):
+ """
+ Get a connection. Will return a pooled connection if one is available.
+
+ If no connections are available and :prop:`.block` is ``False``, then a
+ fresh connection is returned.
+
+ :param timeout:
+ Seconds to wait before giving up and raising
+ :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
+ :prop:`.block` is ``True``.
+ """
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise EmptyPoolError(self,
+ "Pool reached maximum size and no more "
+ "connections are allowed.")
+ pass # Oh well, we'll create a new connection then
+
+ # If this is a persistent connection, check if it got disconnected
+ if conn and is_connection_dropped(conn):
+ log.debug("Resetting dropped connection: %s", self.host)
+ conn.close()
+ if getattr(conn, 'auto_open', 1) == 0:
+ # This is a proxied connection that has been mutated by
+ # httplib._tunnel() and cannot be reused (since it would
+ # attempt to bypass the proxy)
+ conn = None
+
+ return conn or self._new_conn()
+
+ def _put_conn(self, conn):
+ """
+ Put a connection back into the pool.
+
+ :param conn:
+ Connection object for the current host and port as returned by
+ :meth:`._new_conn` or :meth:`._get_conn`.
+
+ If the pool is already full, the connection is closed and discarded
+ because we exceeded maxsize. If connections are discarded frequently,
+ then maxsize should be increased.
+
+ If the pool is closed, then the connection will be closed and discarded.
+ """
+ try:
+ self.pool.put(conn, block=False)
+ return # Everything is dandy, done.
+ except AttributeError:
+ # self.pool is None.
+ pass
+ except queue.Full:
+ # This should never happen if self.block == True
+ log.warning(
+ "Connection pool is full, discarding connection: %s",
+ self.host)
+
+ # Connection never got put back into the pool, close it.
+ if conn:
+ conn.close()
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ pass
+
+ def _prepare_proxy(self, conn):
+ # Nothing to do for HTTP connections.
+ pass
+
+ def _get_timeout(self, timeout):
+ """ Helper that always returns a :class:`urllib3.util.Timeout` """
+ if timeout is _Default:
+ return self.timeout.clone()
+
+ if isinstance(timeout, Timeout):
+ return timeout.clone()
+ else:
+ # User passed us an int/float. This is for backwards compatibility,
+ # can be removed later
+ return Timeout.from_float(timeout)
+
+ def _raise_timeout(self, err, url, timeout_value):
+ """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+ if isinstance(err, SocketTimeout):
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # See the above comment about EAGAIN in Python 3. In Python 2 we have
+ # to specifically catch it and throw the timeout error
+ if hasattr(err, 'errno') and err.errno in _blocking_errnos:
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ # Catch possible read timeouts thrown as SSL errors. If not the
+ # case, rethrow the original. We need to do this because of:
+ # http://bugs.python.org/issue10272
+ if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
+ raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+ def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
+ **httplib_request_kw):
+ """
+ Perform a request on a given urllib connection object taken from our
+ pool.
+
+ :param conn:
+ a connection from one of our connection pools
+
+ :param timeout:
+ Socket timeout in seconds for the request. This can be a
+ float or integer, which will set the same timeout value for
+ the socket connect and the socket read, or an instance of
+ :class:`urllib3.util.Timeout`, which gives you more fine-grained
+ control over your timeouts.
+ """
+ self.num_requests += 1
+
+ timeout_obj = self._get_timeout(timeout)
+ timeout_obj.start_connect()
+ conn.timeout = timeout_obj.connect_timeout
+
+ # Trigger any extra validation we need to do.
+ try:
+ self._validate_conn(conn)
+ except (SocketTimeout, BaseSSLError) as e:
+ # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+ self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+ raise
+
+ # conn.request() calls httplib.*.request, not the method in
+ # urllib3.request. It also calls makefile (recv) on the socket.
+ if chunked:
+ conn.request_chunked(method, url, **httplib_request_kw)
+ else:
+ conn.request(method, url, **httplib_request_kw)
+
+ # Reset the timeout for the recv() on the socket
+ read_timeout = timeout_obj.read_timeout
+
+ # App Engine doesn't have a sock attr
+ if getattr(conn, 'sock', None):
+ # In Python 3 socket.py will catch EAGAIN and return None when you
+ # try and read into the file pointer created by http.client, which
+ # instead raises a BadStatusLine exception. Instead of catching
+ # the exception and assuming all BadStatusLine exceptions are read
+ # timeouts, check for a zero timeout before making the request.
+ if read_timeout == 0:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % read_timeout)
+ if read_timeout is Timeout.DEFAULT_TIMEOUT:
+ conn.sock.settimeout(socket.getdefaulttimeout())
+ else: # None or a value
+ conn.sock.settimeout(read_timeout)
+
+ # Receive the response from the server
+ try:
+ try: # Python 2.7, use buffering of HTTP responses
+ httplib_response = conn.getresponse(buffering=True)
+ except TypeError: # Python 2.6 and older, Python 3
+ try:
+ httplib_response = conn.getresponse()
+ except Exception as e:
+ # Remove the TypeError from the exception chain in Python 3;
+ # otherwise it looks like a programming error was the cause.
+ six.raise_from(e, None)
+ except (SocketTimeout, BaseSSLError, SocketError) as e:
+ self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
+ raise
+
+ # AppEngine doesn't have a version attr.
+ http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
+ log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
+ method, url, http_version, httplib_response.status,
+ httplib_response.length)
+
+ try:
+ assert_header_parsing(httplib_response.msg)
+ except HeaderParsingError as hpe: # Platform-specific: Python 3
+ log.warning(
+ 'Failed to parse headers (url=%s): %s',
+ self._absolute_url(url), hpe, exc_info=True)
+
+ return httplib_response
+
+ def _absolute_url(self, path):
+ return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ # Disable access to the pool
+ old_pool, self.pool = self.pool, None
+
+ try:
+ while True:
+ conn = old_pool.get(block=False)
+ if conn:
+ conn.close()
+
+ except queue.Empty:
+ pass # Done.
+
+ def is_same_host(self, url):
+ """
+ Check if the given ``url`` is a member of the same host as this
+ connection pool.
+ """
+ if url.startswith('/'):
+ return True
+
+ # TODO: Add optional support for socket.gethostbyname checking.
+ scheme, host, port = get_host(url)
+
+ host = _ipv6_host(host).lower()
+
+ # Use explicit default port for comparison when none is given
+ if self.port and not port:
+ port = port_by_scheme.get(scheme)
+ elif not self.port and port == port_by_scheme.get(scheme):
+ port = None
+
+ return (scheme, host, port) == (self.scheme, self.host, self.port)
+
+ def urlopen(self, method, url, body=None, headers=None, retries=None,
+ redirect=True, assert_same_host=True, timeout=_Default,
+ pool_timeout=None, release_conn=None, chunked=False,
+ body_pos=None, **response_kw):
+ """
+ Get a connection from the pool and perform an HTTP request. This is the
+ lowest level call for making a request, so you'll need to specify all
+ the raw details.
+
+ .. note::
+
+ More commonly, it's appropriate to use a convenience method provided
+ by :class:`.RequestMethods`, such as :meth:`request`.
+
+ .. note::
+
+ `release_conn` will only behave as expected if
+ `preload_content=False` because we want to make
+ `preload_content=False` the default behaviour someday soon without
+ breaking backwards compatibility.
+
+ :param method:
+ HTTP request method (such as GET, POST, PUT, etc.)
+
+ :param body:
+ Data to send in the request body (useful for creating
+ POST requests, see HTTPConnectionPool.post_url for
+ more convenience).
+
+ :param headers:
+ Dictionary of custom headers to send, such as User-Agent,
+ If-None-Match, etc. If None, pool headers are used. If provided,
+ these headers completely replace any pool-specific headers.
+
+ :param retries:
+ Configure the number of retries to allow before raising a
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+ Pass ``None`` to retry until you receive a response. Pass a
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
+ over different types of retries.
+ Pass an integer number to retry connection errors that many times,
+ but no other types of errors. Pass zero to never retry.
+
+ If ``False``, then retries are disabled and any exception is raised
+ immediately. Also, instead of raising a MaxRetryError on redirects,
+ the redirect response will be returned.
+
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
+
+ :param redirect:
+ If True, automatically handle redirects (status codes 301, 302,
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
+ will disable redirect, too.
+
+ :param assert_same_host:
+ If ``True``, will make sure that the host of the pool requests is
+ consistent else will raise HostChangedError. When False, you can
+ use the pool on an HTTP proxy and request foreign hosts.
+
+ :param timeout:
+ If specified, overrides the default timeout for this one
+ request. It may be a float (in seconds) or an instance of
+ :class:`urllib3.util.Timeout`.
+
+ :param pool_timeout:
+ If set and the pool is set to block=True, then this method will
+ block for ``pool_timeout`` seconds and raise EmptyPoolError if no
+ connection is available within the time period.
+
+ :param release_conn:
+ If False, then the urlopen call will not release the connection
+ back into the pool once a response is received (but will release if
+ you read the entire contents of the response such as when
+ `preload_content=True`). This is useful if you're not preloading
+ the response's content immediately. You will need to call
+ ``r.release_conn()`` on the response ``r`` to return the connection
+ back into the pool. If None, it takes the value of
+ ``response_kw.get('preload_content', True)``.
+
+ :param chunked:
+ If True, urllib3 will send the body using chunked transfer
+ encoding. Otherwise, urllib3 will send the body using the standard
+ content-length form. Defaults to False.
+
+ :param int body_pos:
+ Position to seek to in file-like body in the event of a retry or
+ redirect. Typically this won't need to be set because urllib3 will
+ auto-populate the value when needed.
+
+ :param \\**response_kw:
+ Additional parameters are passed to
+ :meth:`urllib3.response.HTTPResponse.from_httplib`
+ """
+ if headers is None:
+ headers = self.headers
+
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if release_conn is None:
+ release_conn = response_kw.get('preload_content', True)
+
+ # Check host
+ if assert_same_host and not self.is_same_host(url):
+ raise HostChangedError(self, url, retries)
+
+ conn = None
+
+ # Track whether `conn` needs to be released before
+ # returning/raising/recursing. Update this variable if necessary, and
+ # leave `release_conn` constant throughout the function. That way, if
+ # the function recurses, the original value of `release_conn` will be
+ # passed down into the recursive call, and its value will be respected.
+ #
+ # See issue #651 [1] for details.
+ #
+ # [1] <https://github.com/shazow/urllib3/issues/651>
+ release_this_conn = release_conn
+
+ # Merge the proxy headers. Only do this in HTTP. We have to copy the
+ # headers dict so we can safely change it without those changes being
+ # reflected in anyone else's copy.
+ if self.scheme == 'http':
+ headers = headers.copy()
+ headers.update(self.proxy_headers)
+
+ # Must keep the exception bound to a separate variable or else Python 3
+ # complains about UnboundLocalError.
+ err = None
+
+ # Keep track of whether we cleanly exited the except block. This
+ # ensures we do proper cleanup in finally.
+ clean_exit = False
+
+ # Rewind body position, if needed. Record current position
+ # for future rewinds in the event of a redirect/retry.
+ body_pos = set_file_position(body, body_pos)
+
+ try:
+ # Request a connection from the queue.
+ timeout_obj = self._get_timeout(timeout)
+ conn = self._get_conn(timeout=pool_timeout)
+
+ conn.timeout = timeout_obj.connect_timeout
+
+ is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
+ if is_new_proxy_conn:
+ self._prepare_proxy(conn)
+
+ # Make the request on the httplib connection object.
+ httplib_response = self._make_request(conn, method, url,
+ timeout=timeout_obj,
+ body=body, headers=headers,
+ chunked=chunked)
+
+ # If we're going to release the connection in ``finally:``, then
+ # the response doesn't need to know about the connection. Otherwise
+ # it will also try to release it and we'll have a double-release
+ # mess.
+ response_conn = conn if not release_conn else None
+
+ # Pass method to Response for length checking
+ response_kw['request_method'] = method
+
+ # Import httplib's response into our own wrapper object
+ response = self.ResponseCls.from_httplib(httplib_response,
+ pool=self,
+ connection=response_conn,
+ retries=retries,
+ **response_kw)
+
+ # Everything went great!
+ clean_exit = True
+
+ except queue.Empty:
+ # Timed out by queue.
+ raise EmptyPoolError(self, "No pool connections are available.")
+
+ except (BaseSSLError, CertificateError) as e:
+ # Close the connection. If a connection is reused on which there
+ # was a Certificate error, the next request will certainly raise
+ # another Certificate error.
+ clean_exit = False
+ raise SSLError(e)
+
+ except SSLError:
+ # Treat SSLError separately from BaseSSLError to preserve
+ # traceback.
+ clean_exit = False
+ raise
+
+ except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
+ # Discard the connection for these exceptions. It will be
+ # be replaced during the next _get_conn() call.
+ clean_exit = False
+
+ if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
+ e = ProxyError('Cannot connect to proxy.', e)
+ elif isinstance(e, (SocketError, HTTPException)):
+ e = ProtocolError('Connection aborted.', e)
+
+ retries = retries.increment(method, url, error=e, _pool=self,
+ _stacktrace=sys.exc_info()[2])
+ retries.sleep()
+
+ # Keep track of the error for the retry warning.
+ err = e
+
+ finally:
+ if not clean_exit:
+ # We hit some kind of exception, handled or otherwise. We need
+ # to throw the connection away unless explicitly told not to.
+ # Close the connection, set the variable to None, and make sure
+ # we put the None back in the pool to avoid leaking it.
+ conn = conn and conn.close()
+ release_this_conn = True
+
+ if release_this_conn:
+ # Put the connection back to be reused. If the connection is
+ # expired then it will be None, which will get replaced with a
+ # fresh connection during _get_conn.
+ self._put_conn(conn)
+
+ if not conn:
+ # Try again
+ log.warning("Retrying (%r) after connection "
+ "broken by '%r': %s", retries, err, url)
+ return self.urlopen(method, url, body, headers, retries,
+ redirect, assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and response.get_redirect_location()
+ if redirect_location:
+ if response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+
+ retries.sleep_for_retry(response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(
+ method, redirect_location, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn, body_pos=body_pos,
+ **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(response.getheader('Retry-After'))
+ if retries.is_retry(method, response.status, has_retry_after):
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_status:
+ # Release the connection for this response, since we're not
+ # returning it to be released manually.
+ response.release_conn()
+ raise
+ return response
+ retries.sleep(response)
+ log.debug("Retry: %s", url)
+ return self.urlopen(
+ method, url, body, headers,
+ retries=retries, redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout, pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ body_pos=body_pos, **response_kw)
+
+ return response
+
+
+class HTTPSConnectionPool(HTTPConnectionPool):
+ """
+ Same as :class:`.HTTPConnectionPool`, but HTTPS.
+
+ When Python is compiled with the :mod:`ssl` module, then
+ :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
+ instead of :class:`.HTTPSConnection`.
+
+ :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
+ ``assert_hostname`` and ``host`` in this order to verify connections.
+ If ``assert_hostname`` is False, no verification is done.
+
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+ ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
+ available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+ the connection socket into an SSL socket.
+ """
+
+ scheme = 'https'
+ ConnectionCls = HTTPSConnection
+
+ def __init__(self, host, port=None,
+ strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
+ block=False, headers=None, retries=None,
+ _proxy=None, _proxy_headers=None,
+ key_file=None, cert_file=None, cert_reqs=None,
+ ca_certs=None, ssl_version=None,
+ assert_hostname=None, assert_fingerprint=None,
+ ca_cert_dir=None, **conn_kw):
+
+ HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
+ block, headers, retries, _proxy, _proxy_headers,
+ **conn_kw)
+
+ if ca_certs and cert_reqs is None:
+ cert_reqs = 'CERT_REQUIRED'
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ def _prepare_conn(self, conn):
+ """
+ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
+ and establish the tunnel if proxy is used.
+ """
+
+ if isinstance(conn, VerifiedHTTPSConnection):
+ conn.set_cert(key_file=self.key_file,
+ cert_file=self.cert_file,
+ cert_reqs=self.cert_reqs,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint)
+ conn.ssl_version = self.ssl_version
+ return conn
+
+ def _prepare_proxy(self, conn):
+ """
+ Establish tunnel connection early, because otherwise httplib
+ would improperly set Host: header to proxy's IP:port.
+ """
+ # Python 2.7+
+ try:
+ set_tunnel = conn.set_tunnel
+ except AttributeError: # Platform-specific: Python 2.6
+ set_tunnel = conn._set_tunnel
+
+ if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
+ set_tunnel(self.host, self.port)
+ else:
+ set_tunnel(self.host, self.port, self.proxy_headers)
+
+ conn.connect()
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`httplib.HTTPSConnection`.
+ """
+ self.num_connections += 1
+ log.debug("Starting new HTTPS connection (%d): %s",
+ self.num_connections, self.host)
+
+ if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
+ raise SSLError("Can't connect to HTTPS URL because the SSL "
+ "module is not available.")
+
+ actual_host = self.host
+ actual_port = self.port
+ if self.proxy is not None:
+ actual_host = self.proxy.host
+ actual_port = self.proxy.port
+
+ conn = self.ConnectionCls(host=actual_host, port=actual_port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict, **self.conn_kw)
+
+ return self._prepare_conn(conn)
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+ # Force connect early to allow us to validate the connection.
+ if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
+ conn.connect()
+
+ if not conn.is_verified:
+ warnings.warn((
+ 'Unverified HTTPS request is being made. '
+ 'Adding certificate verification is strongly advised. See: '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings'),
+ InsecureRequestWarning)
+
+
+def connection_from_url(url, **kw):
+ """
+ Given a url, return an :class:`.ConnectionPool` instance of its host.
+
+ This is a shortcut for not having to parse out the scheme, host, and port
+ of the url before creating an :class:`.ConnectionPool` instance.
+
+ :param url:
+ Absolute URL string that must include the scheme. Port is optional.
+
+ :param \\**kw:
+ Passes additional parameters to the constructor of the appropriate
+ :class:`.ConnectionPool`. Useful for specifying things like
+ timeout, maxsize, headers, etc.
+
+ Example::
+
+ >>> conn = connection_from_url('http://google.com/')
+ >>> r = conn.request('GET', '/')
+ """
+ scheme, host, port = get_host(url)
+ port = port or port_by_scheme.get(scheme, 80)
+ if scheme == 'https':
+ return HTTPSConnectionPool(host, port=port, **kw)
+ else:
+ return HTTPConnectionPool(host, port=port, **kw)
+
+
+def _ipv6_host(host):
+ """
+ Process IPv6 address literals
+ """
+
+ # httplib doesn't like it when we include brackets in IPv6 addresses
+ # Specifically, if we include brackets but also pass the port then
+ # httplib crazily doubles up the square brackets on the Host header.
+ # Instead, we need to make sure we never pass ``None`` as the port.
+ # However, for backward compatibility reasons we can't actually
+ # *assert* that. See http://bugs.python.org/issue28539
+ #
+ # Also if an IPv6 address literal has a zone identifier, the
+ # percent sign might be URIencoded, convert it back into ASCII
+ if host.startswith('[') and host.endswith(']'):
+ host = host.replace('%25', '%').strip('[]')
+ return host
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
new file mode 100644
index 00000000..bb826673
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
@@ -0,0 +1,591 @@
+# SPDX-License-Identifier: MIT
+"""
+This module uses ctypes to bind a whole bunch of functions and constants from
+SecureTransport. The goal here is to provide the low-level API to
+SecureTransport. These are essentially the C-level functions and constants, and
+they're pretty gross to work with.
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+ Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import platform
+from ctypes.util import find_library
+from ctypes import (
+ c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
+ c_bool
+)
+from ctypes import CDLL, POINTER, CFUNCTYPE
+
+
+security_path = find_library('Security')
+if not security_path:
+ raise ImportError('The library Security could not be found')
+
+
+core_foundation_path = find_library('CoreFoundation')
+if not core_foundation_path:
+ raise ImportError('The library CoreFoundation could not be found')
+
+
+version = platform.mac_ver()[0]
+version_info = tuple(map(int, version.split('.')))
+if version_info < (10, 8):
+ raise OSError(
+ 'Only OS X 10.8 and newer are supported, not %s.%s' % (
+ version_info[0], version_info[1]
+ )
+ )
+
+Security = CDLL(security_path, use_errno=True)
+CoreFoundation = CDLL(core_foundation_path, use_errno=True)
+
+Boolean = c_bool
+CFIndex = c_long
+CFStringEncoding = c_uint32
+CFData = c_void_p
+CFString = c_void_p
+CFArray = c_void_p
+CFMutableArray = c_void_p
+CFDictionary = c_void_p
+CFError = c_void_p
+CFType = c_void_p
+CFTypeID = c_ulong
+
+CFTypeRef = POINTER(CFType)
+CFAllocatorRef = c_void_p
+
+OSStatus = c_int32
+
+CFDataRef = POINTER(CFData)
+CFStringRef = POINTER(CFString)
+CFArrayRef = POINTER(CFArray)
+CFMutableArrayRef = POINTER(CFMutableArray)
+CFDictionaryRef = POINTER(CFDictionary)
+CFArrayCallBacks = c_void_p
+CFDictionaryKeyCallBacks = c_void_p
+CFDictionaryValueCallBacks = c_void_p
+
+SecCertificateRef = POINTER(c_void_p)
+SecExternalFormat = c_uint32
+SecExternalItemType = c_uint32
+SecIdentityRef = POINTER(c_void_p)
+SecItemImportExportFlags = c_uint32
+SecItemImportExportKeyParameters = c_void_p
+SecKeychainRef = POINTER(c_void_p)
+SSLProtocol = c_uint32
+SSLCipherSuite = c_uint32
+SSLContextRef = POINTER(c_void_p)
+SecTrustRef = POINTER(c_void_p)
+SSLConnectionRef = c_uint32
+SecTrustResultType = c_uint32
+SecTrustOptionFlags = c_uint32
+SSLProtocolSide = c_uint32
+SSLConnectionType = c_uint32
+SSLSessionOption = c_uint32
+
+
+try:
+ Security.SecItemImport.argtypes = [
+ CFDataRef,
+ CFStringRef,
+ POINTER(SecExternalFormat),
+ POINTER(SecExternalItemType),
+ SecItemImportExportFlags,
+ POINTER(SecItemImportExportKeyParameters),
+ SecKeychainRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecItemImport.restype = OSStatus
+
+ Security.SecCertificateGetTypeID.argtypes = []
+ Security.SecCertificateGetTypeID.restype = CFTypeID
+
+ Security.SecIdentityGetTypeID.argtypes = []
+ Security.SecIdentityGetTypeID.restype = CFTypeID
+
+ Security.SecKeyGetTypeID.argtypes = []
+ Security.SecKeyGetTypeID.restype = CFTypeID
+
+ Security.SecCertificateCreateWithData.argtypes = [
+ CFAllocatorRef,
+ CFDataRef
+ ]
+ Security.SecCertificateCreateWithData.restype = SecCertificateRef
+
+ Security.SecCertificateCopyData.argtypes = [
+ SecCertificateRef
+ ]
+ Security.SecCertificateCopyData.restype = CFDataRef
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SecIdentityCreateWithCertificate.argtypes = [
+ CFTypeRef,
+ SecCertificateRef,
+ POINTER(SecIdentityRef)
+ ]
+ Security.SecIdentityCreateWithCertificate.restype = OSStatus
+
+ Security.SecKeychainCreate.argtypes = [
+ c_char_p,
+ c_uint32,
+ c_void_p,
+ Boolean,
+ c_void_p,
+ POINTER(SecKeychainRef)
+ ]
+ Security.SecKeychainCreate.restype = OSStatus
+
+ Security.SecKeychainDelete.argtypes = [
+ SecKeychainRef
+ ]
+ Security.SecKeychainDelete.restype = OSStatus
+
+ Security.SecPKCS12Import.argtypes = [
+ CFDataRef,
+ CFDictionaryRef,
+ POINTER(CFArrayRef)
+ ]
+ Security.SecPKCS12Import.restype = OSStatus
+
+ SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
+ SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
+
+ Security.SSLSetIOFuncs.argtypes = [
+ SSLContextRef,
+ SSLReadFunc,
+ SSLWriteFunc
+ ]
+ Security.SSLSetIOFuncs.restype = OSStatus
+
+ Security.SSLSetPeerID.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerID.restype = OSStatus
+
+ Security.SSLSetCertificate.argtypes = [
+ SSLContextRef,
+ CFArrayRef
+ ]
+ Security.SSLSetCertificate.restype = OSStatus
+
+ Security.SSLSetCertificateAuthorities.argtypes = [
+ SSLContextRef,
+ CFTypeRef,
+ Boolean
+ ]
+ Security.SSLSetCertificateAuthorities.restype = OSStatus
+
+ Security.SSLSetConnection.argtypes = [
+ SSLContextRef,
+ SSLConnectionRef
+ ]
+ Security.SSLSetConnection.restype = OSStatus
+
+ Security.SSLSetPeerDomainName.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t
+ ]
+ Security.SSLSetPeerDomainName.restype = OSStatus
+
+ Security.SSLHandshake.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLHandshake.restype = OSStatus
+
+ Security.SSLRead.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLRead.restype = OSStatus
+
+ Security.SSLWrite.argtypes = [
+ SSLContextRef,
+ c_char_p,
+ c_size_t,
+ POINTER(c_size_t)
+ ]
+ Security.SSLWrite.restype = OSStatus
+
+ Security.SSLClose.argtypes = [
+ SSLContextRef
+ ]
+ Security.SSLClose.restype = OSStatus
+
+ Security.SSLGetNumberSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberSupportedCiphers.restype = OSStatus
+
+ Security.SSLGetSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetSupportedCiphers.restype = OSStatus
+
+ Security.SSLSetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ c_size_t
+ ]
+ Security.SSLSetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNumberEnabledCiphers.argtype = [
+ SSLContextRef,
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetNumberEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t)
+ ]
+ Security.SSLGetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNegotiatedCipher.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite)
+ ]
+ Security.SSLGetNegotiatedCipher.restype = OSStatus
+
+ Security.SSLGetNegotiatedProtocolVersion.argtypes = [
+ SSLContextRef,
+ POINTER(SSLProtocol)
+ ]
+ Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
+
+ Security.SSLCopyPeerTrust.argtypes = [
+ SSLContextRef,
+ POINTER(SecTrustRef)
+ ]
+ Security.SSLCopyPeerTrust.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificates.argtypes = [
+ SecTrustRef,
+ CFArrayRef
+ ]
+ Security.SecTrustSetAnchorCertificates.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
+ SecTrustRef,
+ Boolean
+ ]
+ Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
+
+ Security.SecTrustEvaluate.argtypes = [
+ SecTrustRef,
+ POINTER(SecTrustResultType)
+ ]
+ Security.SecTrustEvaluate.restype = OSStatus
+
+ Security.SecTrustGetCertificateCount.argtypes = [
+ SecTrustRef
+ ]
+ Security.SecTrustGetCertificateCount.restype = CFIndex
+
+ Security.SecTrustGetCertificateAtIndex.argtypes = [
+ SecTrustRef,
+ CFIndex
+ ]
+ Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
+
+ Security.SSLCreateContext.argtypes = [
+ CFAllocatorRef,
+ SSLProtocolSide,
+ SSLConnectionType
+ ]
+ Security.SSLCreateContext.restype = SSLContextRef
+
+ Security.SSLSetSessionOption.argtypes = [
+ SSLContextRef,
+ SSLSessionOption,
+ Boolean
+ ]
+ Security.SSLSetSessionOption.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMin.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMin.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMax.argtypes = [
+ SSLContextRef,
+ SSLProtocol
+ ]
+ Security.SSLSetProtocolVersionMax.restype = OSStatus
+
+ Security.SecCopyErrorMessageString.argtypes = [
+ OSStatus,
+ c_void_p
+ ]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SSLReadFunc = SSLReadFunc
+ Security.SSLWriteFunc = SSLWriteFunc
+ Security.SSLContextRef = SSLContextRef
+ Security.SSLProtocol = SSLProtocol
+ Security.SSLCipherSuite = SSLCipherSuite
+ Security.SecIdentityRef = SecIdentityRef
+ Security.SecKeychainRef = SecKeychainRef
+ Security.SecTrustRef = SecTrustRef
+ Security.SecTrustResultType = SecTrustResultType
+ Security.SecExternalFormat = SecExternalFormat
+ Security.OSStatus = OSStatus
+
+ Security.kSecImportExportPassphrase = CFStringRef.in_dll(
+ Security, 'kSecImportExportPassphrase'
+ )
+ Security.kSecImportItemIdentity = CFStringRef.in_dll(
+ Security, 'kSecImportItemIdentity'
+ )
+
+ # CoreFoundation time!
+ CoreFoundation.CFRetain.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRetain.restype = CFTypeRef
+
+ CoreFoundation.CFRelease.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFRelease.restype = None
+
+ CoreFoundation.CFGetTypeID.argtypes = [
+ CFTypeRef
+ ]
+ CoreFoundation.CFGetTypeID.restype = CFTypeID
+
+ CoreFoundation.CFStringCreateWithCString.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
+
+ CoreFoundation.CFStringGetCStringPtr.argtypes = [
+ CFStringRef,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
+
+ CoreFoundation.CFStringGetCString.argtypes = [
+ CFStringRef,
+ c_char_p,
+ CFIndex,
+ CFStringEncoding
+ ]
+ CoreFoundation.CFStringGetCString.restype = c_bool
+
+ CoreFoundation.CFDataCreate.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFIndex
+ ]
+ CoreFoundation.CFDataCreate.restype = CFDataRef
+
+ CoreFoundation.CFDataGetLength.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetLength.restype = CFIndex
+
+ CoreFoundation.CFDataGetBytePtr.argtypes = [
+ CFDataRef
+ ]
+ CoreFoundation.CFDataGetBytePtr.restype = c_void_p
+
+ CoreFoundation.CFDictionaryCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFDictionaryKeyCallBacks,
+ CFDictionaryValueCallBacks
+ ]
+ CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
+
+ CoreFoundation.CFDictionaryGetValue.argtypes = [
+ CFDictionaryRef,
+ CFTypeRef
+ ]
+ CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
+
+ CoreFoundation.CFArrayCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreate.restype = CFArrayRef
+
+ CoreFoundation.CFArrayCreateMutable.argtypes = [
+ CFAllocatorRef,
+ CFIndex,
+ CFArrayCallBacks
+ ]
+ CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
+
+ CoreFoundation.CFArrayAppendValue.argtypes = [
+ CFMutableArrayRef,
+ c_void_p
+ ]
+ CoreFoundation.CFArrayAppendValue.restype = None
+
+ CoreFoundation.CFArrayGetCount.argtypes = [
+ CFArrayRef
+ ]
+ CoreFoundation.CFArrayGetCount.restype = CFIndex
+
+ CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
+ CFArrayRef,
+ CFIndex
+ ]
+ CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
+
+ CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
+ CoreFoundation, 'kCFAllocatorDefault'
+ )
+ CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
+ )
+ CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
+ CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
+ )
+
+ CoreFoundation.CFTypeRef = CFTypeRef
+ CoreFoundation.CFArrayRef = CFArrayRef
+ CoreFoundation.CFStringRef = CFStringRef
+ CoreFoundation.CFDictionaryRef = CFDictionaryRef
+
+except (AttributeError):
+ raise ImportError('Error initializing ctypes')
+
+
+class CFConst(object):
+ """
+ A class object that acts as essentially a namespace for CoreFoundation
+ constants.
+ """
+ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
+
+
+class SecurityConst(object):
+ """
+ A class object that acts as essentially a namespace for Security constants.
+ """
+ kSSLSessionOptionBreakOnServerAuth = 0
+
+ kSSLProtocol2 = 1
+ kSSLProtocol3 = 2
+ kTLSProtocol1 = 4
+ kTLSProtocol11 = 7
+ kTLSProtocol12 = 8
+
+ kSSLClientSide = 1
+ kSSLStreamType = 0
+
+ kSecFormatPEMSequence = 10
+
+ kSecTrustResultInvalid = 0
+ kSecTrustResultProceed = 1
+ # This gap is present on purpose: this was kSecTrustResultConfirm, which
+ # is deprecated.
+ kSecTrustResultDeny = 3
+ kSecTrustResultUnspecified = 4
+ kSecTrustResultRecoverableTrustFailure = 5
+ kSecTrustResultFatalTrustFailure = 6
+ kSecTrustResultOtherError = 7
+
+ errSSLProtocol = -9800
+ errSSLWouldBlock = -9803
+ errSSLClosedGraceful = -9805
+ errSSLClosedNoNotify = -9816
+ errSSLClosedAbort = -9806
+
+ errSSLXCertChainInvalid = -9807
+ errSSLCrypto = -9809
+ errSSLInternal = -9810
+ errSSLCertExpired = -9814
+ errSSLCertNotYetValid = -9815
+ errSSLUnknownRootCert = -9812
+ errSSLNoRootCert = -9813
+ errSSLHostNameMismatch = -9843
+ errSSLPeerHandshakeFail = -9824
+ errSSLPeerUserCancelled = -9839
+ errSSLWeakPeerEphemeralDHKey = -9850
+ errSSLServerAuthCompleted = -9841
+ errSSLRecordOverflow = -9847
+
+ errSecVerifyFailed = -67808
+ errSecNoTrustSettings = -25263
+ errSecItemNotFound = -25300
+ errSecInvalidTrustSettings = -25262
+
+ # Cipher suites. We only pick the ones our default cipher string allows.
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
+ TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
+ TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
+ TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
+ TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
+ TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
+ TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
+ TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
+ TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
+ TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
+ TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
+ TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
+ TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
new file mode 100644
index 00000000..0f79a137
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
@@ -0,0 +1,344 @@
+# SPDX-License-Identifier: MIT
+"""
+Low-level helpers for the SecureTransport bindings.
+
+These are Python functions that are not directly related to the high-level APIs
+but are necessary to get them to work. They include a whole bunch of low-level
+CoreFoundation messing about and memory management. The concerns in this module
+are almost entirely about trying to avoid memory leaks and providing
+appropriate and useful assistance to the higher-level code.
+"""
+import base64
+import ctypes
+import itertools
+import re
+import os
+import ssl
+import tempfile
+
+from .bindings import Security, CoreFoundation, CFConst
+
+
+# This regular expression is used to grab PEM data out of a PEM bundle.
+_PEM_CERTS_RE = re.compile(
+ b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
+)
+
+
+def _cf_data_from_bytes(bytestring):
+ """
+ Given a bytestring, create a CFData object from it. This CFData object must
+ be CFReleased by the caller.
+ """
+ return CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
+ )
+
+
+def _cf_dictionary_from_tuples(tuples):
+ """
+ Given a list of Python tuples, create an associated CFDictionary.
+ """
+ dictionary_size = len(tuples)
+
+ # We need to get the dictionary keys and values out in the same order.
+ keys = (t[0] for t in tuples)
+ values = (t[1] for t in tuples)
+ cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
+ cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
+
+ return CoreFoundation.CFDictionaryCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ cf_keys,
+ cf_values,
+ dictionary_size,
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks,
+ CoreFoundation.kCFTypeDictionaryValueCallBacks,
+ )
+
+
+def _cf_string_to_unicode(value):
+ """
+ Creates a Unicode string from a CFString object. Used entirely for error
+ reporting.
+
+ Yes, it annoys me quite a lot that this function is this complex.
+ """
+ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
+
+ string = CoreFoundation.CFStringGetCStringPtr(
+ value_as_void_p,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if string is None:
+ buffer = ctypes.create_string_buffer(1024)
+ result = CoreFoundation.CFStringGetCString(
+ value_as_void_p,
+ buffer,
+ 1024,
+ CFConst.kCFStringEncodingUTF8
+ )
+ if not result:
+ raise OSError('Error copying C string from CFStringRef')
+ string = buffer.value
+ if string is not None:
+ string = string.decode('utf-8')
+ return string
+
+
+def _assert_no_error(error, exception_class=None):
+ """
+ Checks the return code and throws an exception if there is an error to
+ report
+ """
+ if error == 0:
+ return
+
+ cf_error_string = Security.SecCopyErrorMessageString(error, None)
+ output = _cf_string_to_unicode(cf_error_string)
+ CoreFoundation.CFRelease(cf_error_string)
+
+ if output is None or output == u'':
+ output = u'OSStatus %s' % error
+
+ if exception_class is None:
+ exception_class = ssl.SSLError
+
+ raise exception_class(output)
+
+
+def _cert_array_from_pem(pem_bundle):
+ """
+ Given a bundle of certs in PEM format, turns them into a CFArray of certs
+ that can be used to validate a cert chain.
+ """
+ der_certs = [
+ base64.b64decode(match.group(1))
+ for match in _PEM_CERTS_RE.finditer(pem_bundle)
+ ]
+ if not der_certs:
+ raise ssl.SSLError("No root certificates specified")
+
+ cert_array = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
+ )
+ if not cert_array:
+ raise ssl.SSLError("Unable to allocate memory!")
+
+ try:
+ for der_bytes in der_certs:
+ certdata = _cf_data_from_bytes(der_bytes)
+ if not certdata:
+ raise ssl.SSLError("Unable to allocate memory!")
+ cert = Security.SecCertificateCreateWithData(
+ CoreFoundation.kCFAllocatorDefault, certdata
+ )
+ CoreFoundation.CFRelease(certdata)
+ if not cert:
+ raise ssl.SSLError("Unable to build cert object!")
+
+ CoreFoundation.CFArrayAppendValue(cert_array, cert)
+ CoreFoundation.CFRelease(cert)
+ except Exception:
+ # We need to free the array before the exception bubbles further.
+ # We only want to do that if an error occurs: otherwise, the caller
+ # should free.
+ CoreFoundation.CFRelease(cert_array)
+
+ return cert_array
+
+
+def _is_cert(item):
+ """
+ Returns True if a given CFTypeRef is a certificate.
+ """
+ expected = Security.SecCertificateGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _is_identity(item):
+ """
+ Returns True if a given CFTypeRef is an identity.
+ """
+ expected = Security.SecIdentityGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _temporary_keychain():
+ """
+ This function creates a temporary Mac keychain that we can use to work with
+ credentials. This keychain uses a one-time password and a temporary file to
+ store the data. We expect to have one keychain per socket. The returned
+ SecKeychainRef must be freed by the caller, including calling
+ SecKeychainDelete.
+
+ Returns a tuple of the SecKeychainRef and the path to the temporary
+ directory that contains it.
+ """
+ # Unfortunately, SecKeychainCreate requires a path to a keychain. This
+ # means we cannot use mkstemp to use a generic temporary file. Instead,
+ # we're going to create a temporary directory and a filename to use there.
+ # This filename will be 8 random bytes expanded into base64. We also need
+ # some random bytes to password-protect the keychain we're creating, so we
+ # ask for 40 random bytes.
+ random_bytes = os.urandom(40)
+ filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
+ password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
+ tempdirectory = tempfile.mkdtemp()
+
+ keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
+
+ # We now want to create the keychain itself.
+ keychain = Security.SecKeychainRef()
+ status = Security.SecKeychainCreate(
+ keychain_path,
+ len(password),
+ password,
+ False,
+ None,
+ ctypes.byref(keychain)
+ )
+ _assert_no_error(status)
+
+ # Having created the keychain, we want to pass it off to the caller.
+ return keychain, tempdirectory
+
+
+def _load_items_from_file(keychain, path):
+ """
+ Given a single file, loads all the trust objects from it into arrays and
+ the keychain.
+ Returns a tuple of lists: the first list is a list of identities, the
+ second a list of certs.
+ """
+ certificates = []
+ identities = []
+ result_array = None
+
+ with open(path, 'rb') as f:
+ raw_filedata = f.read()
+
+ try:
+ filedata = CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ raw_filedata,
+ len(raw_filedata)
+ )
+ result_array = CoreFoundation.CFArrayRef()
+ result = Security.SecItemImport(
+ filedata, # cert data
+ None, # Filename, leaving it out for now
+ None, # What the type of the file is, we don't care
+ None, # what's in the file, we don't care
+ 0, # import flags
+ None, # key params, can include passphrase in the future
+ keychain, # The keychain to insert into
+ ctypes.byref(result_array) # Results
+ )
+ _assert_no_error(result)
+
+ # A CFArray is not very useful to us as an intermediary
+ # representation, so we are going to extract the objects we want
+ # and then free the array. We don't need to keep hold of keys: the
+ # keychain already has them!
+ result_count = CoreFoundation.CFArrayGetCount(result_array)
+ for index in range(result_count):
+ item = CoreFoundation.CFArrayGetValueAtIndex(
+ result_array, index
+ )
+ item = ctypes.cast(item, CoreFoundation.CFTypeRef)
+
+ if _is_cert(item):
+ CoreFoundation.CFRetain(item)
+ certificates.append(item)
+ elif _is_identity(item):
+ CoreFoundation.CFRetain(item)
+ identities.append(item)
+ finally:
+ if result_array:
+ CoreFoundation.CFRelease(result_array)
+
+ CoreFoundation.CFRelease(filedata)
+
+ return (identities, certificates)
+
+
+def _load_client_cert_chain(keychain, *paths):
+ """
+ Load certificates and maybe keys from a number of files. Has the end goal
+ of returning a CFArray containing one SecIdentityRef, and then zero or more
+ SecCertificateRef objects, suitable for use as a client certificate trust
+ chain.
+ """
+ # Ok, the strategy.
+ #
+ # This relies on knowing that macOS will not give you a SecIdentityRef
+ # unless you have imported a key into a keychain. This is a somewhat
+ # artificial limitation of macOS (for example, it doesn't necessarily
+ # affect iOS), but there is nothing inside Security.framework that lets you
+ # get a SecIdentityRef without having a key in a keychain.
+ #
+ # So the policy here is we take all the files and iterate them in order.
+ # Each one will use SecItemImport to have one or more objects loaded from
+ # it. We will also point at a keychain that macOS can use to work with the
+ # private key.
+ #
+ # Once we have all the objects, we'll check what we actually have. If we
+ # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
+ # we'll take the first certificate (which we assume to be our leaf) and
+ # ask the keychain to give us a SecIdentityRef with that cert's associated
+ # key.
+ #
+ # We'll then return a CFArray containing the trust chain: one
+ # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
+ # responsibility for freeing this CFArray will be with the caller. This
+ # CFArray must remain alive for the entire connection, so in practice it
+ # will be stored with a single SSLSocket, along with the reference to the
+ # keychain.
+ certificates = []
+ identities = []
+
+ # Filter out bad paths.
+ paths = (path for path in paths if path)
+
+ try:
+ for file_path in paths:
+ new_identities, new_certs = _load_items_from_file(
+ keychain, file_path
+ )
+ identities.extend(new_identities)
+ certificates.extend(new_certs)
+
+ # Ok, we have everything. The question is: do we have an identity? If
+ # not, we want to grab one from the first cert we have.
+ if not identities:
+ new_identity = Security.SecIdentityRef()
+ status = Security.SecIdentityCreateWithCertificate(
+ keychain,
+ certificates[0],
+ ctypes.byref(new_identity)
+ )
+ _assert_no_error(status)
+ identities.append(new_identity)
+
+ # We now want to release the original certificate, as we no longer
+ # need it.
+ CoreFoundation.CFRelease(certificates.pop(0))
+
+ # We now need to build a new CFArray that holds the trust chain.
+ trust_chain = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ for item in itertools.chain(identities, certificates):
+ # ArrayAppendValue does a CFRetain on the item. That's fine,
+ # because the finally block will release our other refs to them.
+ CoreFoundation.CFArrayAppendValue(trust_chain, item)
+
+ return trust_chain
+ finally:
+ for obj in itertools.chain(identities, certificates):
+ CoreFoundation.CFRelease(obj)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
new file mode 100644
index 00000000..e74589fa
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
@@ -0,0 +1,297 @@
+# SPDX-License-Identifier: MIT
+"""
+This module provides a pool manager that uses Google App Engine's
+`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+Example usage::
+
+ from urllib3 import PoolManager
+ from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
+
+ if is_appengine_sandbox():
+ # AppEngineManager uses AppEngine's URLFetch API behind the scenes
+ http = AppEngineManager()
+ else:
+ # PoolManager uses a socket-level API behind the scenes
+ http = PoolManager()
+
+ r = http.request('GET', 'https://google.com/')
+
+There are `limitations <https://cloud.google.com/appengine/docs/python/\
+urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
+the best choice for your application. There are three options for using
+urllib3 on Google App Engine:
+
+1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
+ cost-effective in many circumstances as long as your usage is within the
+ limitations.
+2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
+ Sockets also have `limitations and restrictions
+ <https://cloud.google.com/appengine/docs/python/sockets/\
+ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
+ To use sockets, be sure to specify the following in your ``app.yaml``::
+
+ env_variables:
+ GAE_USE_SOCKETS_HTTPLIB : 'true'
+
+3. If you are using `App Engine Flexible
+<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
+:class:`PoolManager` without any configuration or special environment variables.
+"""
+
+from __future__ import absolute_import
+import logging
+import os
+import warnings
+from ..packages.six.moves.urllib.parse import urljoin
+
+from ..exceptions import (
+ HTTPError,
+ HTTPWarning,
+ MaxRetryError,
+ ProtocolError,
+ TimeoutError,
+ SSLError
+)
+
+from ..packages.six import BytesIO
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.timeout import Timeout
+from ..util.retry import Retry
+
+try:
+ from google.appengine.api import urlfetch
+except ImportError:
+ urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+ pass
+
+
+class AppEnginePlatformError(HTTPError):
+ pass
+
+
+class AppEngineManager(RequestMethods):
+ """
+ Connection manager for Google App Engine sandbox applications.
+
+ This manager uses the URLFetch service directly instead of using the
+ emulated httplib, and is subject to URLFetch limitations as described in
+ the App Engine documentation `here
+ <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+ Notably it will raise an :class:`AppEnginePlatformError` if:
+ * URLFetch is not available.
+ * If you attempt to use this on App Engine Flexible, as full socket
+ support is available.
+ * If a request size is more than 10 megabytes.
+ * If a response size is more than 32 megabtyes.
+ * If you use an unsupported request method such as OPTIONS.
+
+ Beyond those cases, it will raise normal urllib3 errors.
+ """
+
+ def __init__(self, headers=None, retries=None, validate_certificate=True,
+ urlfetch_retries=True):
+ if not urlfetch:
+ raise AppEnginePlatformError(
+ "URLFetch is not available in this environment.")
+
+ if is_prod_appengine_mvms():
+ raise AppEnginePlatformError(
+ "Use normal urllib3.PoolManager instead of AppEngineManager"
+ "on Managed VMs, as using URLFetch is not necessary in "
+ "this environment.")
+
+ warnings.warn(
+ "urllib3 is using URLFetch on Google App Engine sandbox instead "
+ "of sockets. To use sockets directly instead of URLFetch see "
+ "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
+ AppEnginePlatformWarning)
+
+ RequestMethods.__init__(self, headers)
+ self.validate_certificate = validate_certificate
+ self.urlfetch_retries = urlfetch_retries
+
+ self.retries = retries or Retry.DEFAULT
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def urlopen(self, method, url, body=None, headers=None,
+ retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
+ **response_kw):
+
+ retries = self._get_retries(retries, redirect)
+
+ try:
+ follow_redirects = (
+ redirect and
+ retries.redirect != 0 and
+ retries.total)
+ response = urlfetch.fetch(
+ url,
+ payload=body,
+ method=method,
+ headers=headers or {},
+ allow_truncated=False,
+ follow_redirects=self.urlfetch_retries and follow_redirects,
+ deadline=self._get_absolute_timeout(timeout),
+ validate_certificate=self.validate_certificate,
+ )
+ except urlfetch.DeadlineExceededError as e:
+ raise TimeoutError(self, e)
+
+ except urlfetch.InvalidURLError as e:
+ if 'too large' in str(e):
+ raise AppEnginePlatformError(
+ "URLFetch request too large, URLFetch only "
+ "supports requests up to 10mb in size.", e)
+ raise ProtocolError(e)
+
+ except urlfetch.DownloadError as e:
+ if 'Too many redirects' in str(e):
+ raise MaxRetryError(self, url, reason=e)
+ raise ProtocolError(e)
+
+ except urlfetch.ResponseTooLargeError as e:
+ raise AppEnginePlatformError(
+ "URLFetch response too large, URLFetch only supports"
+ "responses up to 32mb in size.", e)
+
+ except urlfetch.SSLCertificateError as e:
+ raise SSLError(e)
+
+ except urlfetch.InvalidMethodError as e:
+ raise AppEnginePlatformError(
+ "URLFetch does not support method: %s" % method, e)
+
+ http_response = self._urlfetch_response_to_http_response(
+ response, retries=retries, **response_kw)
+
+ # Handle redirect?
+ redirect_location = redirect and http_response.get_redirect_location()
+ if redirect_location:
+ # Check for redirect response
+ if (self.urlfetch_retries and retries.raise_on_redirect):
+ raise MaxRetryError(self, url, "too many redirects")
+ else:
+ if http_response.status == 303:
+ method = 'GET'
+
+ try:
+ retries = retries.increment(method, url, response=http_response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ return http_response
+
+ retries.sleep_for_retry(http_response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ redirect_url = urljoin(url, redirect_location)
+ return self.urlopen(
+ method, redirect_url, body, headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(http_response.getheader('Retry-After'))
+ if retries.is_retry(method, http_response.status, has_retry_after):
+ retries = retries.increment(
+ method, url, response=http_response, _pool=self)
+ log.debug("Retry: %s", url)
+ retries.sleep(http_response)
+ return self.urlopen(
+ method, url,
+ body=body, headers=headers,
+ retries=retries, redirect=redirect,
+ timeout=timeout, **response_kw)
+
+ return http_response
+
+ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+ if is_prod_appengine():
+ # Production GAE handles deflate encoding automatically, but does
+ # not remove the encoding header.
+ content_encoding = urlfetch_resp.headers.get('content-encoding')
+
+ if content_encoding == 'deflate':
+ del urlfetch_resp.headers['content-encoding']
+
+ transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
+ # We have a full response's content,
+ # so let's make sure we don't report ourselves as chunked data.
+ if transfer_encoding == 'chunked':
+ encodings = transfer_encoding.split(",")
+ encodings.remove('chunked')
+ urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
+
+ return HTTPResponse(
+ # In order for decoding to work, we must present the content as
+ # a file-like object.
+ body=BytesIO(urlfetch_resp.content),
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ **response_kw
+ )
+
+ def _get_absolute_timeout(self, timeout):
+ if timeout is Timeout.DEFAULT_TIMEOUT:
+ return None # Defer to URLFetch's default.
+ if isinstance(timeout, Timeout):
+ if timeout._read is not None or timeout._connect is not None:
+ warnings.warn(
+ "URLFetch does not support granular timeout settings, "
+ "reverting to total or default URLFetch timeout.",
+ AppEnginePlatformWarning)
+ return timeout.total
+ return timeout
+
+ def _get_retries(self, retries, redirect):
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(
+ retries, redirect=redirect, default=self.retries)
+
+ if retries.connect or retries.read or retries.redirect:
+ warnings.warn(
+ "URLFetch only supports total retries and does not "
+ "recognize connect, read, or redirect retry parameters.",
+ AppEnginePlatformWarning)
+
+ return retries
+
+
+def is_appengine():
+ return (is_local_appengine() or
+ is_prod_appengine() or
+ is_prod_appengine_mvms())
+
+
+def is_appengine_sandbox():
+ return is_appengine() and not is_prod_appengine_mvms()
+
+
+def is_local_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Development/' in os.environ['SERVER_SOFTWARE'])
+
+
+def is_prod_appengine():
+ return ('APPENGINE_RUNTIME' in os.environ and
+ 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
+ not is_prod_appengine_mvms())
+
+
+def is_prod_appengine_mvms():
+ return os.environ.get('GAE_VM', False) == 'true'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
new file mode 100644
index 00000000..3f8c9ebf
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: MIT
+"""
+NTLM authenticating pool, contributed by erikcederstran
+
+Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
+"""
+from __future__ import absolute_import
+
+from logging import getLogger
+from ntlm import ntlm
+
+from .. import HTTPSConnectionPool
+from ..packages.six.moves.http_client import HTTPSConnection
+
+
+log = getLogger(__name__)
+
+
+class NTLMConnectionPool(HTTPSConnectionPool):
+ """
+ Implements an NTLM authentication version of an urllib3 connection pool
+ """
+
+ scheme = 'https'
+
+ def __init__(self, user, pw, authurl, *args, **kwargs):
+ """
+ authurl is a random URL on the server that is protected by NTLM.
+ user is the Windows user, probably in the DOMAIN\\username format.
+ pw is the password for the user.
+ """
+ super(NTLMConnectionPool, self).__init__(*args, **kwargs)
+ self.authurl = authurl
+ self.rawuser = user
+ user_parts = user.split('\\', 1)
+ self.domain = user_parts[0].upper()
+ self.user = user_parts[1]
+ self.pw = pw
+
+ def _new_conn(self):
+ # Performs the NTLM handshake that secures the connection. The socket
+ # must be kept open while requests are performed.
+ self.num_connections += 1
+ log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
+ self.num_connections, self.host, self.authurl)
+
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ req_header = 'Authorization'
+ resp_header = 'www-authenticate'
+
+ conn = HTTPSConnection(host=self.host, port=self.port)
+
+ # Send negotiation message
+ headers[req_header] = (
+ 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ reshdr = dict(res.getheaders())
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', reshdr)
+ log.debug('Response data: %s [...]', res.read(100))
+
+ # Remove the reference to the socket, so that it can not be closed by
+ # the response object (we want to keep the socket open)
+ res.fp = None
+
+ # Server should respond with a challenge message
+ auth_header_values = reshdr[resp_header].split(', ')
+ auth_header_value = None
+ for s in auth_header_values:
+ if s[:5] == 'NTLM ':
+ auth_header_value = s[5:]
+ if auth_header_value is None:
+ raise Exception('Unexpected %s response header: %s' %
+ (resp_header, reshdr[resp_header]))
+
+ # Send authentication message
+ ServerChallenge, NegotiateFlags = \
+ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
+ auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
+ self.user,
+ self.domain,
+ self.pw,
+ NegotiateFlags)
+ headers[req_header] = 'NTLM %s' % auth_msg
+ log.debug('Request headers: %s', headers)
+ conn.request('GET', self.authurl, None, headers)
+ res = conn.getresponse()
+ log.debug('Response status: %s %s', res.status, res.reason)
+ log.debug('Response headers: %s', dict(res.getheaders()))
+ log.debug('Response data: %s [...]', res.read()[:100])
+ if res.status != 200:
+ if res.status == 401:
+ raise Exception('Server rejected request: wrong '
+ 'username or password')
+ raise Exception('Wrong server response: %s %s' %
+ (res.status, res.reason))
+
+ res.fp = None
+ log.debug('Connection established')
+ return conn
+
+ def urlopen(self, method, url, body=None, headers=None, retries=3,
+ redirect=True, assert_same_host=True):
+ if headers is None:
+ headers = {}
+ headers['Connection'] = 'Keep-Alive'
+ return super(NTLMConnectionPool, self).urlopen(method, url, body,
+ headers, retries,
+ redirect,
+ assert_same_host)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
new file mode 100644
index 00000000..8d373507
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
@@ -0,0 +1,458 @@
+# SPDX-License-Identifier: MIT
+"""
+SSL with SNI_-support for Python 2. Follow these instructions if you would
+like to verify SSL certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
+
+This needs the following packages installed:
+
+* pyOpenSSL (tested with 16.0.0)
+* cryptography (minimum 1.3.4, from pyopenssl)
+* idna (minimum 2.0, from cryptography)
+
+However, pyopenssl depends on cryptography, which depends on idna, so while we
+use all three directly here we end up having relatively few packages required.
+
+You can install them with the following command:
+
+ pip install pyopenssl cryptography idna
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this::
+
+ try:
+ import urllib3.contrib.pyopenssl
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+ except ImportError:
+ pass
+
+Now you can use :mod:`urllib3` as you normally would, and it will support SNI
+when the required modules are installed.
+
+Activating this module also has the positive side effect of disabling SSL/TLS
+compression in Python 2 (see `CRIME attack`_).
+
+If you want to configure the default list of supported cipher suites, you can
+set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
+
+.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
+.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
+"""
+from __future__ import absolute_import
+
+import OpenSSL.SSL
+from cryptography import x509
+from cryptography.hazmat.backends.openssl import backend as openssl_backend
+from cryptography.hazmat.backends.openssl.x509 import _Certificate
+
+from socket import timeout, error as SocketError
+from io import BytesIO
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+import logging
+import ssl
+
+try:
+ import six
+except ImportError:
+ from ..packages import six
+
+import sys
+
+from .. import util
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works.
+HAS_SNI = True
+
+# Map from urllib3 to PyOpenSSL compatible parameter-values.
+_openssl_versions = {
+ ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
+ ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
+}
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
+try:
+ _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
+except AttributeError:
+ pass
+
+_stdlib_to_openssl_verify = {
+ ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
+ ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
+ ssl.CERT_REQUIRED:
+ OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+}
+_openssl_to_stdlib_verify = dict(
+ (v, k) for k, v in _stdlib_to_openssl_verify.items()
+)
+
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+
+log = logging.getLogger(__name__)
+
+
+def inject_into_urllib3():
+ 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
+
+ _validate_dependencies_met()
+
+ util.ssl_.SSLContext = PyOpenSSLContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_PYOPENSSL = True
+ util.ssl_.IS_PYOPENSSL = True
+
+
+def extract_from_urllib3():
+ 'Undo monkey-patching by :func:`inject_into_urllib3`.'
+
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_PYOPENSSL = False
+ util.ssl_.IS_PYOPENSSL = False
+
+
+def _validate_dependencies_met():
+ """
+ Verifies that PyOpenSSL's package-level dependencies have been met.
+ Throws `ImportError` if they are not met.
+ """
+ # Method added in `cryptography==1.1`; not available in older versions
+ from cryptography.x509.extensions import Extensions
+ if getattr(Extensions, "get_extension_for_class", None) is None:
+ raise ImportError("'cryptography' module missing required functionality. "
+ "Try upgrading to v1.3.4 or newer.")
+
+ # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
+ # attribute is only present on those versions.
+ from OpenSSL.crypto import X509
+ x509 = X509()
+ if getattr(x509, "_x509", None) is None:
+ raise ImportError("'pyOpenSSL' module missing required functionality. "
+ "Try upgrading to v0.14 or newer.")
+
+
+def _dnsname_to_stdlib(name):
+ """
+ Converts a dNSName SubjectAlternativeName field to the form used by the
+ standard library on the given Python version.
+
+ Cryptography produces a dNSName as a unicode string that was idna-decoded
+ from ASCII bytes. We need to idna-encode that string to get it back, and
+ then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
+ uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
+ """
+ def idna_encode(name):
+ """
+ Borrowed wholesale from the Python Cryptography Project. It turns out
+ that we can't just safely call `idna.encode`: it can explode for
+ wildcard names. This avoids that problem.
+ """
+ import idna
+
+ for prefix in [u'*.', u'.']:
+ if name.startswith(prefix):
+ name = name[len(prefix):]
+ return prefix.encode('ascii') + idna.encode(name)
+ return idna.encode(name)
+
+ name = idna_encode(name)
+ if sys.version_info >= (3, 0):
+ name = name.decode('utf-8')
+ return name
+
+
+def get_subj_alt_name(peer_cert):
+ """
+ Given an PyOpenSSL certificate, provides all the subject alternative names.
+ """
+ # Pass the cert to cryptography, which has much better APIs for this.
+ # This is technically using private APIs, but should work across all
+ # relevant versions until PyOpenSSL gets something proper for this.
+ cert = _Certificate(openssl_backend, peer_cert._x509)
+
+ # We want to find the SAN extension. Ask Cryptography to locate it (it's
+ # faster than looping in Python)
+ try:
+ ext = cert.extensions.get_extension_for_class(
+ x509.SubjectAlternativeName
+ ).value
+ except x509.ExtensionNotFound:
+ # No such extension, return the empty list.
+ return []
+ except (x509.DuplicateExtension, x509.UnsupportedExtension,
+ x509.UnsupportedGeneralNameType, UnicodeError) as e:
+ # A problem has been found with the quality of the certificate. Assume
+ # no SAN field is present.
+ log.warning(
+ "A problem was encountered with the certificate that prevented "
+ "urllib3 from finding the SubjectAlternativeName field. This can "
+ "affect certificate validation. The error was %s",
+ e,
+ )
+ return []
+
+ # We want to return dNSName and iPAddress fields. We need to cast the IPs
+ # back to strings because the match_hostname function wants them as
+ # strings.
+ # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
+ # decoded. This is pretty frustrating, but that's what the standard library
+ # does with certificates, and so we need to attempt to do the same.
+ names = [
+ ('DNS', _dnsname_to_stdlib(name))
+ for name in ext.get_values_for_type(x509.DNSName)
+ ]
+ names.extend(
+ ('IP Address', str(name))
+ for name in ext.get_values_for_type(x509.IPAddress)
+ )
+
+ return names
+
+
+class WrappedSocket(object):
+ '''API-compatibility wrapper for Python OpenSSL's Connection-class.
+
+ Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+ collector of pypy.
+ '''
+
+ def __init__(self, connection, socket, suppress_ragged_eofs=True):
+ self.connection = connection
+ self.socket = socket
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+ self._closed = False
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, *args, **kwargs):
+ try:
+ data = self.connection.recv(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return b''
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return b''
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv(*args, **kwargs)
+ else:
+ return data
+
+ def recv_into(self, *args, **kwargs):
+ try:
+ return self.connection.recv_into(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+ return 0
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError as e:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return 0
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(self.socket, self.socket.gettimeout())
+ if not rd:
+ raise timeout('The read operation timed out')
+ else:
+ return self.recv_into(*args, **kwargs)
+
+ def settimeout(self, timeout):
+ return self.socket.settimeout(timeout)
+
+ def _send_until_done(self, data):
+ while True:
+ try:
+ return self.connection.send(data)
+ except OpenSSL.SSL.WantWriteError:
+ wr = util.wait_for_write(self.socket, self.socket.gettimeout())
+ if not wr:
+ raise timeout()
+ continue
+ except OpenSSL.SSL.SysCallError as e:
+ raise SocketError(str(e))
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ # FIXME rethrow compatible exceptions should we ever use this
+ self.connection.shutdown()
+
+ def close(self):
+ if self._makefile_refs < 1:
+ try:
+ self._closed = True
+ return self.connection.close()
+ except OpenSSL.SSL.Error:
+ return
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ x509 = self.connection.get_peer_certificate()
+
+ if not x509:
+ return x509
+
+ if binary_form:
+ return OpenSSL.crypto.dump_certificate(
+ OpenSSL.crypto.FILETYPE_ASN1,
+ x509)
+
+ return {
+ 'subject': (
+ (('commonName', x509.get_subject().CN),),
+ ),
+ 'subjectAltName': get_subj_alt_name(x509)
+ }
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ makefile = backport_makefile
+
+WrappedSocket.makefile = makefile
+
+
+class PyOpenSSLContext(object):
+ """
+ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
+ for translating the interface of the standard library ``SSLContext`` object
+ to calls into PyOpenSSL.
+ """
+ def __init__(self, protocol):
+ self.protocol = _openssl_versions[protocol]
+ self._ctx = OpenSSL.SSL.Context(self.protocol)
+ self._options = 0
+ self.check_hostname = False
+
+ @property
+ def options(self):
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ self._options = value
+ self._ctx.set_options(value)
+
+ @property
+ def verify_mode(self):
+ return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._ctx.set_verify(
+ _stdlib_to_openssl_verify[value],
+ _verify_callback
+ )
+
+ def set_default_verify_paths(self):
+ self._ctx.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ if isinstance(ciphers, six.text_type):
+ ciphers = ciphers.encode('utf-8')
+ self._ctx.set_cipher_list(ciphers)
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ if cafile is not None:
+ cafile = cafile.encode('utf-8')
+ if capath is not None:
+ capath = capath.encode('utf-8')
+ self._ctx.load_verify_locations(cafile, capath)
+ if cadata is not None:
+ self._ctx.load_verify_locations(BytesIO(cadata))
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._ctx.use_certificate_file(certfile)
+ if password is not None:
+ self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
+ self._ctx.use_privatekey_file(keyfile or certfile)
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ cnx = OpenSSL.SSL.Connection(self._ctx, sock)
+
+ if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
+ server_hostname = server_hostname.encode('utf-8')
+
+ if server_hostname is not None:
+ cnx.set_tlsext_host_name(server_hostname)
+
+ cnx.set_connect_state()
+
+ while True:
+ try:
+ cnx.do_handshake()
+ except OpenSSL.SSL.WantReadError:
+ rd = util.wait_for_read(sock, sock.gettimeout())
+ if not rd:
+ raise timeout('select timed out')
+ continue
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError('bad handshake: %r' % e)
+ break
+
+ return WrappedSocket(cnx, sock)
+
+
+def _verify_callback(cnx, x509, err_no, err_depth, return_code):
+ return err_no == 0
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
new file mode 100644
index 00000000..fcc30118
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
@@ -0,0 +1,808 @@
+# SPDX-License-Identifier: MIT
+"""
+SecureTranport support for urllib3 via ctypes.
+
+This makes platform-native TLS available to urllib3 users on macOS without the
+use of a compiler. This is an important feature because the Python Package
+Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
+that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
+this is to give macOS users an alternative solution to the problem, and that
+solution is to use SecureTransport.
+
+We use ctypes here because this solution must not require a compiler. That's
+because pip is not allowed to require a compiler either.
+
+This is not intended to be a seriously long-term solution to this problem.
+The hope is that PEP 543 will eventually solve this issue for us, at which
+point we can retire this contrib module. But in the short term, we need to
+solve the impending tire fire that is Python on Mac without this kind of
+contrib module. So...here we are.
+
+To use this module, simply import and inject it::
+
+ import urllib3.contrib.securetransport
+ urllib3.contrib.securetransport.inject_into_urllib3()
+
+Happy TLSing!
+"""
+from __future__ import absolute_import
+
+import contextlib
+import ctypes
+import errno
+import os.path
+import shutil
+import socket
+import ssl
+import threading
+import weakref
+
+from .. import util
+from ._securetransport.bindings import (
+ Security, SecurityConst, CoreFoundation
+)
+from ._securetransport.low_level import (
+ _assert_no_error, _cert_array_from_pem, _temporary_keychain,
+ _load_client_cert_chain
+)
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+try:
+ memoryview(b'')
+except NameError:
+ raise ImportError("SecureTransport only works on Pythons with memoryview")
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works
+HAS_SNI = True
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+# This dictionary is used by the read callback to obtain a handle to the
+# calling wrapped socket. This is a pretty silly approach, but for now it'll
+# do. I feel like I should be able to smuggle a handle to the wrapped socket
+# directly in the SSLConnectionRef, but for now this approach will work I
+# guess.
+#
+# We need to lock around this structure for inserts, but we don't do it for
+# reads/writes in the callbacks. The reasoning here goes as follows:
+#
+# 1. It is not possible to call into the callbacks before the dictionary is
+# populated, so once in the callback the id must be in the dictionary.
+# 2. The callbacks don't mutate the dictionary, they only read from it, and
+# so cannot conflict with any of the insertions.
+#
+# This is good: if we had to lock in the callbacks we'd drastically slow down
+# the performance of this code.
+_connection_refs = weakref.WeakValueDictionary()
+_connection_ref_lock = threading.Lock()
+
+# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
+# for no better reason than we need *a* limit, and this one is right there.
+SSL_WRITE_BLOCKSIZE = 16384
+
+# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
+# individual cipher suites. We need to do this becuase this is how
+# SecureTransport wants them.
+CIPHER_SUITES = [
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
+]
+
+# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
+# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
+_protocol_to_min_max = {
+ ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv2"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
+ SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
+ )
+if hasattr(ssl, "PROTOCOL_SSLv3"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
+ SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
+ SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
+ SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_2"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
+ SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
+ )
+if hasattr(ssl, "PROTOCOL_TLS"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
+
+
+def inject_into_urllib3():
+ """
+ Monkey-patch urllib3 with SecureTransport-backed SSL-support.
+ """
+ util.ssl_.SSLContext = SecureTransportContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_SECURETRANSPORT = True
+ util.ssl_.IS_SECURETRANSPORT = True
+
+
+def extract_from_urllib3():
+ """
+ Undo monkey-patching by :func:`inject_into_urllib3`.
+ """
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_SECURETRANSPORT = False
+ util.ssl_.IS_SECURETRANSPORT = False
+
+
+def _read_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport read callback. This is called by ST to request that data
+ be returned from the socket.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ requested_length = data_length_pointer[0]
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ read_count = 0
+ buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
+ buffer_view = memoryview(buffer)
+
+ try:
+ while read_count < requested_length:
+ if timeout is None or timeout >= 0:
+ readables = util.wait_for_read([base_socket], timeout)
+ if not readables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+
+ # We need to tell ctypes that we have a buffer that can be
+ # written to. Upsettingly, we do that like this:
+ chunk_size = base_socket.recv_into(
+ buffer_view[read_count:requested_length]
+ )
+ read_count += chunk_size
+ if not chunk_size:
+ if not read_count:
+ return SecurityConst.errSSLClosedGraceful
+ break
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = read_count
+
+ if read_count != requested_length:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+def _write_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport write callback. This is called by ST to request that data
+ actually be sent on the network.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ bytes_to_write = data_length_pointer[0]
+ data = ctypes.string_at(data_buffer, bytes_to_write)
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ sent = 0
+
+ try:
+ while sent < bytes_to_write:
+ if timeout is None or timeout >= 0:
+ writables = util.wait_for_write([base_socket], timeout)
+ if not writables:
+ raise socket.error(errno.EAGAIN, 'timed out')
+ chunk_sent = base_socket.send(data)
+ sent += chunk_sent
+
+ # This has some needless copying here, but I'm not sure there's
+ # much value in optimising this data path.
+ data = data[chunk_sent:]
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ if error == errno.ECONNRESET:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = sent
+ if sent != bytes_to_write:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+# We need to keep these two objects references alive: if they get GC'd while
+# in use then SecureTransport could attempt to call a function that is in freed
+# memory. That would be...uh...bad. Yeah, that's the word. Bad.
+_read_callback_pointer = Security.SSLReadFunc(_read_callback)
+_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
+
+
+class WrappedSocket(object):
+ """
+ API-compatibility wrapper for Python's OpenSSL wrapped socket object.
+
+ Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
+ collector of PyPy.
+ """
+ def __init__(self, socket):
+ self.socket = socket
+ self.context = None
+ self._makefile_refs = 0
+ self._closed = False
+ self._exception = None
+ self._keychain = None
+ self._keychain_dir = None
+ self._client_cert_chain = None
+
+ # We save off the previously-configured timeout and then set it to
+ # zero. This is done because we use select and friends to handle the
+ # timeouts, but if we leave the timeout set on the lower socket then
+ # Python will "kindly" call select on that socket again for us. Avoid
+ # that by forcing the timeout to zero.
+ self._timeout = self.socket.gettimeout()
+ self.socket.settimeout(0)
+
+ @contextlib.contextmanager
+ def _raise_on_error(self):
+ """
+ A context manager that can be used to wrap calls that do I/O from
+ SecureTransport. If any of the I/O callbacks hit an exception, this
+ context manager will correctly propagate the exception after the fact.
+ This avoids silently swallowing those exceptions.
+
+ It also correctly forces the socket closed.
+ """
+ self._exception = None
+
+ # We explicitly don't catch around this yield because in the unlikely
+ # event that an exception was hit in the block we don't want to swallow
+ # it.
+ yield
+ if self._exception is not None:
+ exception, self._exception = self._exception, None
+ self.close()
+ raise exception
+
+ def _set_ciphers(self):
+ """
+ Sets up the allowed ciphers. By default this matches the set in
+ util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
+ custom and doesn't allow changing at this time, mostly because parsing
+ OpenSSL cipher strings is going to be a freaking nightmare.
+ """
+ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
+ result = Security.SSLSetEnabledCiphers(
+ self.context, ciphers, len(CIPHER_SUITES)
+ )
+ _assert_no_error(result)
+
+ def _custom_validate(self, verify, trust_bundle):
+ """
+ Called when we have set custom validation. We do this in two cases:
+ first, when cert validation is entirely disabled; and second, when
+ using a custom trust DB.
+ """
+ # If we disabled cert validation, just say: cool.
+ if not verify:
+ return
+
+ # We want data in memory, so load it up.
+ if os.path.isfile(trust_bundle):
+ with open(trust_bundle, 'rb') as f:
+ trust_bundle = f.read()
+
+ cert_array = None
+ trust = Security.SecTrustRef()
+
+ try:
+ # Get a CFArray that contains the certs we want.
+ cert_array = _cert_array_from_pem(trust_bundle)
+
+ # Ok, now the hard part. We want to get the SecTrustRef that ST has
+ # created for this connection, shove our CAs into it, tell ST to
+ # ignore everything else it knows, and then ask if it can build a
+ # chain. This is a buuuunch of code.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ raise ssl.SSLError("Failed to copy trust reference")
+
+ result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
+ _assert_no_error(result)
+
+ result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
+ _assert_no_error(result)
+
+ trust_result = Security.SecTrustResultType()
+ result = Security.SecTrustEvaluate(
+ trust, ctypes.byref(trust_result)
+ )
+ _assert_no_error(result)
+ finally:
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ if cert_array is None:
+ CoreFoundation.CFRelease(cert_array)
+
+ # Ok, now we can look at what the result was.
+ successes = (
+ SecurityConst.kSecTrustResultUnspecified,
+ SecurityConst.kSecTrustResultProceed
+ )
+ if trust_result.value not in successes:
+ raise ssl.SSLError(
+ "certificate verify failed, error code: %d" %
+ trust_result.value
+ )
+
+ def handshake(self,
+ server_hostname,
+ verify,
+ trust_bundle,
+ min_version,
+ max_version,
+ client_cert,
+ client_key,
+ client_key_passphrase):
+ """
+ Actually performs the TLS handshake. This is run automatically by
+ wrapped socket, and shouldn't be needed in user code.
+ """
+ # First, we do the initial bits of connection setup. We need to create
+ # a context, set its I/O funcs, and set the connection reference.
+ self.context = Security.SSLCreateContext(
+ None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
+ )
+ result = Security.SSLSetIOFuncs(
+ self.context, _read_callback_pointer, _write_callback_pointer
+ )
+ _assert_no_error(result)
+
+ # Here we need to compute the handle to use. We do this by taking the
+ # id of self modulo 2**31 - 1. If this is already in the dictionary, we
+ # just keep incrementing by one until we find a free space.
+ with _connection_ref_lock:
+ handle = id(self) % 2147483647
+ while handle in _connection_refs:
+ handle = (handle + 1) % 2147483647
+ _connection_refs[handle] = self
+
+ result = Security.SSLSetConnection(self.context, handle)
+ _assert_no_error(result)
+
+ # If we have a server hostname, we should set that too.
+ if server_hostname:
+ if not isinstance(server_hostname, bytes):
+ server_hostname = server_hostname.encode('utf-8')
+
+ result = Security.SSLSetPeerDomainName(
+ self.context, server_hostname, len(server_hostname)
+ )
+ _assert_no_error(result)
+
+ # Setup the ciphers.
+ self._set_ciphers()
+
+ # Set the minimum and maximum TLS versions.
+ result = Security.SSLSetProtocolVersionMin(self.context, min_version)
+ _assert_no_error(result)
+ result = Security.SSLSetProtocolVersionMax(self.context, max_version)
+ _assert_no_error(result)
+
+ # If there's a trust DB, we need to use it. We do that by telling
+ # SecureTransport to break on server auth. We also do that if we don't
+ # want to validate the certs at all: we just won't actually do any
+ # authing in that case.
+ if not verify or trust_bundle is not None:
+ result = Security.SSLSetSessionOption(
+ self.context,
+ SecurityConst.kSSLSessionOptionBreakOnServerAuth,
+ True
+ )
+ _assert_no_error(result)
+
+ # If there's a client cert, we need to use it.
+ if client_cert:
+ self._keychain, self._keychain_dir = _temporary_keychain()
+ self._client_cert_chain = _load_client_cert_chain(
+ self._keychain, client_cert, client_key
+ )
+ result = Security.SSLSetCertificate(
+ self.context, self._client_cert_chain
+ )
+ _assert_no_error(result)
+
+ while True:
+ with self._raise_on_error():
+ result = Security.SSLHandshake(self.context)
+
+ if result == SecurityConst.errSSLWouldBlock:
+ raise socket.timeout("handshake timed out")
+ elif result == SecurityConst.errSSLServerAuthCompleted:
+ self._custom_validate(verify, trust_bundle)
+ continue
+ else:
+ _assert_no_error(result)
+ break
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, bufsiz):
+ buffer = ctypes.create_string_buffer(bufsiz)
+ bytes_read = self.recv_into(buffer, bufsiz)
+ data = buffer[:bytes_read]
+ return data
+
+ def recv_into(self, buffer, nbytes=None):
+ # Read short on EOF.
+ if self._closed:
+ return 0
+
+ if nbytes is None:
+ nbytes = len(buffer)
+
+ buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLRead(
+ self.context, buffer, nbytes, ctypes.byref(processed_bytes)
+ )
+
+ # There are some result codes that we want to treat as "not always
+ # errors". Specifically, those are errSSLWouldBlock,
+ # errSSLClosedGraceful, and errSSLClosedNoNotify.
+ if (result == SecurityConst.errSSLWouldBlock):
+ # If we didn't process any bytes, then this was just a time out.
+ # However, we can get errSSLWouldBlock in situations when we *did*
+ # read some data, and in those cases we should just read "short"
+ # and return.
+ if processed_bytes.value == 0:
+ # Timed out, no data read.
+ raise socket.timeout("recv timed out")
+ elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
+ # The remote peer has closed this connection. We should do so as
+ # well. Note that we don't actually return here because in
+ # principle this could actually be fired along with return data.
+ # It's unlikely though.
+ self.close()
+ else:
+ _assert_no_error(result)
+
+ # Ok, we read and probably succeeded. We should return whatever data
+ # was actually read.
+ return processed_bytes.value
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def send(self, data):
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLWrite(
+ self.context, data, len(data), ctypes.byref(processed_bytes)
+ )
+
+ if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
+ # Timed out
+ raise socket.timeout("send timed out")
+ else:
+ _assert_no_error(result)
+
+ # We sent, and probably succeeded. Tell them how much we sent.
+ return processed_bytes.value
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ with self._raise_on_error():
+ Security.SSLClose(self.context)
+
+ def close(self):
+ # TODO: should I do clean shutdown here? Do I have to?
+ if self._makefile_refs < 1:
+ self._closed = True
+ if self.context:
+ CoreFoundation.CFRelease(self.context)
+ self.context = None
+ if self._client_cert_chain:
+ CoreFoundation.CFRelease(self._client_cert_chain)
+ self._client_cert_chain = None
+ if self._keychain:
+ Security.SecKeychainDelete(self._keychain)
+ CoreFoundation.CFRelease(self._keychain)
+ shutil.rmtree(self._keychain_dir)
+ self._keychain = self._keychain_dir = None
+ return self.socket.close()
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ # Urgh, annoying.
+ #
+ # Here's how we do this:
+ #
+ # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
+ # connection.
+ # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
+ # 3. To get the CN, call SecCertificateCopyCommonName and process that
+ # string so that it's of the appropriate type.
+ # 4. To get the SAN, we need to do something a bit more complex:
+ # a. Call SecCertificateCopyValues to get the data, requesting
+ # kSecOIDSubjectAltName.
+ # b. Mess about with this dictionary to try to get the SANs out.
+ #
+ # This is gross. Really gross. It's going to be a few hundred LoC extra
+ # just to repeat something that SecureTransport can *already do*. So my
+ # operating assumption at this time is that what we want to do is
+ # instead to just flag to urllib3 that it shouldn't do its own hostname
+ # validation when using SecureTransport.
+ if not binary_form:
+ raise ValueError(
+ "SecureTransport only supports dumping binary certs"
+ )
+ trust = Security.SecTrustRef()
+ certdata = None
+ der_bytes = None
+
+ try:
+ # Grab the trust store.
+ result = Security.SSLCopyPeerTrust(
+ self.context, ctypes.byref(trust)
+ )
+ _assert_no_error(result)
+ if not trust:
+ # Probably we haven't done the handshake yet. No biggie.
+ return None
+
+ cert_count = Security.SecTrustGetCertificateCount(trust)
+ if not cert_count:
+ # Also a case that might happen if we haven't handshaked.
+ # Handshook? Handshaken?
+ return None
+
+ leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
+ assert leaf
+
+ # Ok, now we want the DER bytes.
+ certdata = Security.SecCertificateCopyData(leaf)
+ assert certdata
+
+ data_length = CoreFoundation.CFDataGetLength(certdata)
+ data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
+ der_bytes = ctypes.string_at(data_buffer, data_length)
+ finally:
+ if certdata:
+ CoreFoundation.CFRelease(certdata)
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ return der_bytes
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+else: # Platform-specific: Python 3
+ def makefile(self, mode="r", buffering=None, *args, **kwargs):
+ # We disable buffering with SecureTransport because it conflicts with
+ # the buffering that ST does internally (see issue #1153 for more).
+ buffering = 0
+ return backport_makefile(self, mode, buffering, *args, **kwargs)
+
+WrappedSocket.makefile = makefile
+
+
+class SecureTransportContext(object):
+ """
+ I am a wrapper class for the SecureTransport library, to translate the
+ interface of the standard library ``SSLContext`` object to calls into
+ SecureTransport.
+ """
+ def __init__(self, protocol):
+ self._min_version, self._max_version = _protocol_to_min_max[protocol]
+ self._options = 0
+ self._verify = False
+ self._trust_bundle = None
+ self._client_cert = None
+ self._client_key = None
+ self._client_key_passphrase = None
+
+ @property
+ def check_hostname(self):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ return True
+
+ @check_hostname.setter
+ def check_hostname(self, value):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ pass
+
+ @property
+ def options(self):
+ # TODO: Well, crap.
+ #
+ # So this is the bit of the code that is the most likely to cause us
+ # trouble. Essentially we need to enumerate all of the SSL options that
+ # users might want to use and try to see if we can sensibly translate
+ # them, or whether we should just ignore them.
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ # TODO: Update in line with above.
+ self._options = value
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._verify = True if value == ssl.CERT_REQUIRED else False
+
+ def set_default_verify_paths(self):
+ # So, this has to do something a bit weird. Specifically, what it does
+ # is nothing.
+ #
+ # This means that, if we had previously had load_verify_locations
+ # called, this does not undo that. We need to do that because it turns
+ # out that the rest of the urllib3 code will attempt to load the
+ # default verify paths if it hasn't been told about any paths, even if
+ # the context itself was sometime earlier. We resolve that by just
+ # ignoring it.
+ pass
+
+ def load_default_certs(self):
+ return self.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ # For now, we just require the default cipher string.
+ if ciphers != util.ssl_.DEFAULT_CIPHERS:
+ raise ValueError(
+ "SecureTransport doesn't support custom cipher strings"
+ )
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ # OK, we only really support cadata and cafile.
+ if capath is not None:
+ raise ValueError(
+ "SecureTransport does not support cert directories"
+ )
+
+ self._trust_bundle = cafile or cadata
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._client_cert = certfile
+ self._client_key = keyfile
+ self._client_cert_passphrase = password
+
+ def wrap_socket(self, sock, server_side=False,
+ do_handshake_on_connect=True, suppress_ragged_eofs=True,
+ server_hostname=None):
+ # So, what do we do here? Firstly, we assert some properties. This is a
+ # stripped down shim, so there is some functionality we don't support.
+ # See PEP 543 for the real deal.
+ assert not server_side
+ assert do_handshake_on_connect
+ assert suppress_ragged_eofs
+
+ # Ok, we're good to go. Now we want to create the wrapped socket object
+ # and store it in the appropriate place.
+ wrapped_socket = WrappedSocket(sock)
+
+ # Now we can handshake
+ wrapped_socket.handshake(
+ server_hostname, self._verify, self._trust_bundle,
+ self._min_version, self._max_version, self._client_cert,
+ self._client_key, self._client_key_passphrase
+ )
+ return wrapped_socket
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
new file mode 100644
index 00000000..1cb79285
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
+"""
+This module contains provisional support for SOCKS proxies from within
+urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
+SOCKS5. To enable its functionality, either install PySocks or install this
+module with the ``socks`` extra.
+
+The SOCKS implementation supports the full range of urllib3 features. It also
+supports the following SOCKS features:
+
+- SOCKS4
+- SOCKS4a
+- SOCKS5
+- Usernames and passwords for the SOCKS proxy
+
+Known Limitations:
+
+- Currently PySocks does not support contacting remote websites via literal
+ IPv6 addresses. Any such connection attempt will fail. You must use a domain
+ name.
+- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
+ such connection attempt will fail.
+"""
+from __future__ import absolute_import
+
+try:
+ import socks
+except ImportError:
+ import warnings
+ from ..exceptions import DependencyWarning
+
+ warnings.warn((
+ 'SOCKS support in urllib3 requires the installation of optional '
+ 'dependencies: specifically, PySocks. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
+ ),
+ DependencyWarning
+ )
+ raise
+
+from socket import error as SocketError, timeout as SocketTimeout
+
+from ..connection import (
+ HTTPConnection, HTTPSConnection
+)
+from ..connectionpool import (
+ HTTPConnectionPool, HTTPSConnectionPool
+)
+from ..exceptions import ConnectTimeoutError, NewConnectionError
+from ..poolmanager import PoolManager
+from ..util.url import parse_url
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+
+class SOCKSConnection(HTTPConnection):
+ """
+ A plain-text HTTP connection that connects via a SOCKS proxy.
+ """
+ def __init__(self, *args, **kwargs):
+ self._socks_options = kwargs.pop('_socks_options')
+ super(SOCKSConnection, self).__init__(*args, **kwargs)
+
+ def _new_conn(self):
+ """
+ Establish a new connection via the SOCKS proxy.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw['source_address'] = self.source_address
+
+ if self.socket_options:
+ extra_kw['socket_options'] = self.socket_options
+
+ try:
+ conn = socks.create_connection(
+ (self.host, self.port),
+ proxy_type=self._socks_options['socks_version'],
+ proxy_addr=self._socks_options['proxy_host'],
+ proxy_port=self._socks_options['proxy_port'],
+ proxy_username=self._socks_options['username'],
+ proxy_password=self._socks_options['password'],
+ proxy_rdns=self._socks_options['rdns'],
+ timeout=self.timeout,
+ **extra_kw
+ )
+
+ except SocketTimeout as e:
+ raise ConnectTimeoutError(
+ self, "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout))
+
+ except socks.ProxyError as e:
+ # This is fragile as hell, but it seems to be the only way to raise
+ # useful errors here.
+ if e.socket_err:
+ error = e.socket_err
+ if isinstance(error, SocketTimeout):
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)" %
+ (self.host, self.timeout)
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % error
+ )
+ else:
+ raise NewConnectionError(
+ self,
+ "Failed to establish a new connection: %s" % e
+ )
+
+ except SocketError as e: # Defensive: PySocks should catch all these.
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e)
+
+ return conn
+
+
+# We don't need to duplicate the Verified/Unverified distinction from
+# urllib3/connection.py here because the HTTPSConnection will already have been
+# correctly set to either the Verified or Unverified form by that module. This
+# means the SOCKSHTTPSConnection will automatically be the correct type.
+class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
+ pass
+
+
+class SOCKSHTTPConnectionPool(HTTPConnectionPool):
+ ConnectionCls = SOCKSConnection
+
+
+class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
+ ConnectionCls = SOCKSHTTPSConnection
+
+
+class SOCKSProxyManager(PoolManager):
+ """
+ A version of the urllib3 ProxyManager that routes connections via the
+ defined SOCKS proxy.
+ """
+ pool_classes_by_scheme = {
+ 'http': SOCKSHTTPConnectionPool,
+ 'https': SOCKSHTTPSConnectionPool,
+ }
+
+ def __init__(self, proxy_url, username=None, password=None,
+ num_pools=10, headers=None, **connection_pool_kw):
+ parsed = parse_url(proxy_url)
+
+ if parsed.scheme == 'socks5':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = False
+ elif parsed.scheme == 'socks5h':
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = True
+ elif parsed.scheme == 'socks4':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = False
+ elif parsed.scheme == 'socks4a':
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = True
+ else:
+ raise ValueError(
+ "Unable to determine SOCKS version from %s" % proxy_url
+ )
+
+ self.proxy_url = proxy_url
+
+ socks_options = {
+ 'socks_version': socks_version,
+ 'proxy_host': parsed.host,
+ 'proxy_port': parsed.port,
+ 'username': username,
+ 'password': password,
+ 'rdns': rdns
+ }
+ connection_pool_kw['_socks_options'] = socks_options
+
+ super(SOCKSProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw
+ )
+
+ self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/collectors/python.d.plugin/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
new file mode 100644
index 00000000..a71cabe0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
@@ -0,0 +1,247 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from .packages.six.moves.http_client import (
+ IncompleteRead as httplib_IncompleteRead
+)
+# Base Exceptions
+
+
+class HTTPError(Exception):
+ "Base exception used by this module."
+ pass
+
+
+class HTTPWarning(Warning):
+ "Base warning used by this module."
+ pass
+
+
+class PoolError(HTTPError):
+ "Base exception for errors caused within a pool."
+ def __init__(self, pool, message):
+ self.pool = pool
+ HTTPError.__init__(self, "%s: %s" % (pool, message))
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, None)
+
+
+class RequestError(PoolError):
+ "Base exception for PoolErrors that have associated URLs."
+ def __init__(self, pool, url, message):
+ self.url = url
+ PoolError.__init__(self, pool, message)
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, self.url, None)
+
+
+class SSLError(HTTPError):
+ "Raised when SSL certificate fails in an HTTPS connection."
+ pass
+
+
+class ProxyError(HTTPError):
+ "Raised when the connection to a proxy fails."
+ pass
+
+
+class DecodeError(HTTPError):
+ "Raised when automatic decoding based on Content-Type fails."
+ pass
+
+
+class ProtocolError(HTTPError):
+ "Raised when something unexpected happens mid-request/response."
+ pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
+# Leaf Exceptions
+
+class MaxRetryError(RequestError):
+ """Raised when the maximum number of retries is exceeded.
+
+ :param pool: The connection pool
+ :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+ :param string url: The requested Url
+ :param exceptions.Exception reason: The underlying error
+
+ """
+
+ def __init__(self, pool, url, reason=None):
+ self.reason = reason
+
+ message = "Max retries exceeded with url: %s (Caused by %r)" % (
+ url, reason)
+
+ RequestError.__init__(self, pool, url, message)
+
+
+class HostChangedError(RequestError):
+ "Raised when an existing pool gets a request for a foreign host."
+
+ def __init__(self, pool, url, retries=3):
+ message = "Tried to open a foreign host with url: %s" % url
+ RequestError.__init__(self, pool, url, message)
+ self.retries = retries
+
+
+class TimeoutStateError(HTTPError):
+ """ Raised when passing an invalid state to a timeout """
+ pass
+
+
+class TimeoutError(HTTPError):
+ """ Raised when a socket timeout error occurs.
+
+ Catching this error will catch both :exc:`ReadTimeoutErrors
+ <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
+ """
+ pass
+
+
+class ReadTimeoutError(TimeoutError, RequestError):
+ "Raised when a socket timeout occurs while receiving data from a server"
+ pass
+
+
+# This timeout error does not have a URL attached and needs to inherit from the
+# base HTTPError
+class ConnectTimeoutError(TimeoutError):
+ "Raised when a socket timeout occurs while connecting to a server"
+ pass
+
+
+class NewConnectionError(ConnectTimeoutError, PoolError):
+ "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
+ pass
+
+
+class EmptyPoolError(PoolError):
+ "Raised when a pool runs out of connections and no more are allowed."
+ pass
+
+
+class ClosedPoolError(PoolError):
+ "Raised when a request enters a pool after the pool has been closed."
+ pass
+
+
+class LocationValueError(ValueError, HTTPError):
+ "Raised when there is something wrong with a given URL input."
+ pass
+
+
+class LocationParseError(LocationValueError):
+ "Raised when get_host or similar fails to parse the URL input."
+
+ def __init__(self, location):
+ message = "Failed to parse: %s" % location
+ HTTPError.__init__(self, message)
+
+ self.location = location
+
+
+class ResponseError(HTTPError):
+ "Used as a container for an error reason supplied in a MaxRetryError."
+ GENERIC_ERROR = 'too many error responses'
+ SPECIFIC_ERROR = 'too many {status_code} error responses'
+
+
+class SecurityWarning(HTTPWarning):
+ "Warned when perfoming security reducing actions"
+ pass
+
+
+class SubjectAltNameWarning(SecurityWarning):
+ "Warned when connecting to a host with a certificate missing a SAN."
+ pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+ "Warned when making an unverified HTTPS request."
+ pass
+
+
+class SystemTimeWarning(SecurityWarning):
+ "Warned when system time is suspected to be wrong"
+ pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+ "Warned when certain SSL configuration is not available on a platform."
+ pass
+
+
+class SNIMissingWarning(HTTPWarning):
+ "Warned when making a HTTPS request without SNI available."
+ pass
+
+
+class DependencyWarning(HTTPWarning):
+ """
+ Warned when an attempt is made to import a module with missing optional
+ dependencies.
+ """
+ pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+ "Response needs to be chunked in order to read it as chunks."
+ pass
+
+
+class BodyNotHttplibCompatible(HTTPError):
+ """
+ Body should be httplib.HTTPResponse like (have an fp attribute which
+ returns raw chunks) for read_chunked().
+ """
+ pass
+
+
+class IncompleteRead(HTTPError, httplib_IncompleteRead):
+ """
+ Response length doesn't match expected Content-Length
+
+ Subclass of http_client.IncompleteRead to allow int value
+ for `partial` to avoid creating large objects on streamed
+ reads.
+ """
+ def __init__(self, partial, expected):
+ super(IncompleteRead, self).__init__(partial, expected)
+
+ def __repr__(self):
+ return ('IncompleteRead(%i bytes read, '
+ '%i more expected)' % (self.partial, self.expected))
+
+
+class InvalidHeader(HTTPError):
+ "The header provided was somehow invalid."
+ pass
+
+
+class ProxySchemeUnknown(AssertionError, ValueError):
+ "ProxyManager does not support the supplied scheme"
+ # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+ def __init__(self, scheme):
+ message = "Not supported proxy scheme %s" % scheme
+ super(ProxySchemeUnknown, self).__init__(message)
+
+
+class HeaderParsingError(HTTPError):
+ "Raised by assert_header_parsing, but we convert it to a log.warning statement."
+ def __init__(self, defects, unparsed_data):
+ message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
+ super(HeaderParsingError, self).__init__(message)
+
+
+class UnrewindableBodyError(HTTPError):
+ "urllib3 encountered an error when trying to rewind a body"
+ pass
diff --git a/collectors/python.d.plugin/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py
new file mode 100644
index 00000000..de7577b7
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/fields.py
@@ -0,0 +1,179 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import email.utils
+import mimetypes
+
+from .packages import six
+
+
+def guess_content_type(filename, default='application/octet-stream'):
+ """
+ Guess the "Content-Type" of a file.
+
+ :param filename:
+ The filename to guess the "Content-Type" of using :mod:`mimetypes`.
+ :param default:
+ If no "Content-Type" can be guessed, default to `default`.
+ """
+ if filename:
+ return mimetypes.guess_type(filename)[0] or default
+ return default
+
+
+def format_header_param(name, value):
+ """
+ Helper function to format and quote a single header parameter.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows RFC 2231, as
+ suggested by RFC 2388 Section 4.4.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ if not any(ch in value for ch in '"\\\r\n'):
+ result = '%s="%s"' % (name, value)
+ try:
+ result.encode('ascii')
+ except (UnicodeEncodeError, UnicodeDecodeError):
+ pass
+ else:
+ return result
+ if not six.PY3 and isinstance(value, six.text_type): # Python 2:
+ value = value.encode('utf-8')
+ value = email.utils.encode_rfc2231(value, 'utf-8')
+ value = '%s*=%s' % (name, value)
+ return value
+
+
+class RequestField(object):
+ """
+ A data container for request body parameters.
+
+ :param name:
+ The name of this request field.
+ :param data:
+ The data/value body.
+ :param filename:
+ An optional filename of the request field.
+ :param headers:
+ An optional dict-like object of headers to initially use for the field.
+ """
+ def __init__(self, name, data, filename=None, headers=None):
+ self._name = name
+ self._filename = filename
+ self.data = data
+ self.headers = {}
+ if headers:
+ self.headers = dict(headers)
+
+ @classmethod
+ def from_tuples(cls, fieldname, value):
+ """
+ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
+
+ Supports constructing :class:`~urllib3.fields.RequestField` from
+ parameter of key/value strings AND key/filetuple. A filetuple is a
+ (filename, data, MIME type) tuple where the MIME type is optional.
+ For example::
+
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+
+ Field names and filenames must be unicode.
+ """
+ if isinstance(value, tuple):
+ if len(value) == 3:
+ filename, data, content_type = value
+ else:
+ filename, data = value
+ content_type = guess_content_type(filename)
+ else:
+ filename = None
+ content_type = None
+ data = value
+
+ request_param = cls(fieldname, data, filename=filename)
+ request_param.make_multipart(content_type=content_type)
+
+ return request_param
+
+ def _render_part(self, name, value):
+ """
+ Overridable helper function to format a single header parameter.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+ return format_header_param(name, value)
+
+ def _render_parts(self, header_parts):
+ """
+ Helper function to format and quote a single header.
+
+ Useful for single headers that are composed of multiple items. E.g.,
+ 'Content-Disposition' fields.
+
+ :param header_parts:
+ A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
+ as `k1="v1"; k2="v2"; ...`.
+ """
+ parts = []
+ iterable = header_parts
+ if isinstance(header_parts, dict):
+ iterable = header_parts.items()
+
+ for name, value in iterable:
+ if value is not None:
+ parts.append(self._render_part(name, value))
+
+ return '; '.join(parts)
+
+ def render_headers(self):
+ """
+ Renders the headers for this request field.
+ """
+ lines = []
+
+ sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
+ for sort_key in sort_keys:
+ if self.headers.get(sort_key, False):
+ lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
+
+ for header_name, header_value in self.headers.items():
+ if header_name not in sort_keys:
+ if header_value:
+ lines.append('%s: %s' % (header_name, header_value))
+
+ lines.append('\r\n')
+ return '\r\n'.join(lines)
+
+ def make_multipart(self, content_disposition=None, content_type=None,
+ content_location=None):
+ """
+ Makes this request field into a multipart request field.
+
+ This method overrides "Content-Disposition", "Content-Type" and
+ "Content-Location" headers to the request parameter.
+
+ :param content_type:
+ The 'Content-Type' of the request body.
+ :param content_location:
+ The 'Content-Location' of the request body.
+
+ """
+ self.headers['Content-Disposition'] = content_disposition or 'form-data'
+ self.headers['Content-Disposition'] += '; '.join([
+ '', self._render_parts(
+ (('name', self._name), ('filename', self._filename))
+ )
+ ])
+ self.headers['Content-Type'] = content_type
+ self.headers['Content-Location'] = content_location
diff --git a/collectors/python.d.plugin/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
new file mode 100644
index 00000000..3febc9cf
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import codecs
+
+from uuid import uuid4
+from io import BytesIO
+
+from .packages import six
+from .packages.six import b
+from .fields import RequestField
+
+writer = codecs.lookup('utf-8')[3]
+
+
+def choose_boundary():
+ """
+ Our embarrassingly-simple replacement for mimetools.choose_boundary.
+ """
+ return uuid4().hex
+
+
+def iter_field_objects(fields):
+ """
+ Iterate over fields.
+
+ Supports list of (k, v) tuples and dicts, and lists of
+ :class:`~urllib3.fields.RequestField`.
+
+ """
+ if isinstance(fields, dict):
+ i = six.iteritems(fields)
+ else:
+ i = iter(fields)
+
+ for field in i:
+ if isinstance(field, RequestField):
+ yield field
+ else:
+ yield RequestField.from_tuples(*field)
+
+
+def iter_fields(fields):
+ """
+ .. deprecated:: 1.6
+
+ Iterate over fields.
+
+ The addition of :class:`~urllib3.fields.RequestField` makes this function
+ obsolete. Instead, use :func:`iter_field_objects`, which returns
+ :class:`~urllib3.fields.RequestField` objects.
+
+ Supports list of (k, v) tuples and dicts.
+ """
+ if isinstance(fields, dict):
+ return ((k, v) for k, v in six.iteritems(fields))
+
+ return ((k, v) for k, v in fields)
+
+
+def encode_multipart_formdata(fields, boundary=None):
+ """
+ Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
+
+ :param fields:
+ Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
+
+ :param boundary:
+ If not specified, then a random boundary will be generated using
+ :func:`mimetools.choose_boundary`.
+ """
+ body = BytesIO()
+ if boundary is None:
+ boundary = choose_boundary()
+
+ for field in iter_field_objects(fields):
+ body.write(b('--%s\r\n' % (boundary)))
+
+ writer(body).write(field.render_headers())
+ data = field.data
+
+ if isinstance(data, int):
+ data = str(data) # Backwards compatibility
+
+ if isinstance(data, six.text_type):
+ writer(body).write(data)
+ else:
+ body.write(data)
+
+ body.write(b'\r\n')
+
+ body.write(b('--%s--\r\n' % (boundary)))
+
+ content_type = str('multipart/form-data; boundary=%s' % boundary)
+
+ return body.getvalue(), content_type
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
new file mode 100644
index 00000000..170e974c
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import ssl_match_hostname
+
+__all__ = ('ssl_match_hostname', )
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
new file mode 100644
index 00000000..8ab122f8
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: MIT
+"""
+backports.makefile
+~~~~~~~~~~~~~~~~~~
+
+Backports the Python 3 ``socket.makefile`` method for use with anything that
+wants to create a "fake" socket object.
+"""
+import io
+
+from socket import SocketIO
+
+
+def backport_makefile(self, mode="r", buffering=None, encoding=None,
+ errors=None, newline=None):
+ """
+ Backport of ``socket.makefile`` from Python 3.5.
+ """
+ if not set(mode) <= set(["r", "w", "b"]):
+ raise ValueError(
+ "invalid mode %r (only r, w, b allowed)" % (mode,)
+ )
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = SocketIO(self, rawmode)
+ self._makefile_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
new file mode 100644
index 00000000..9f7c0e6b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
@@ -0,0 +1,260 @@
+# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
+# Passes Python2.7's test suite and incorporates all the latest updates.
+# Copyright 2009 Raymond Hettinger, released under the MIT License.
+# http://code.activestate.com/recipes/576693/
+# SPDX-License-Identifier: MIT
+try:
+ from thread import get_ident as _get_ident
+except ImportError:
+ from dummy_thread import get_ident as _get_ident
+
+try:
+ from _abcoll import KeysView, ValuesView, ItemsView
+except ImportError:
+ pass
+
+
+class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as for regular dictionaries.
+
+ # The internal self.__map dictionary maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. Signature is the same as for
+ regular dictionaries, but keyword arguments are not recommended
+ because their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__root = root = [] # sentinel node
+ root[:] = [root, root, None]
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link which goes at the end of the linked
+ # list, and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ root = self.__root
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which is
+ # then removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link_prev, link_next, key = self.__map.pop(key)
+ link_prev[1] = link_next
+ link_next[0] = link_prev
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ root = self.__root
+ curr = root[1]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[1]
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ root = self.__root
+ curr = root[0]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[0]
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ try:
+ for node in self.__map.itervalues():
+ del node[:]
+ root = self.__root
+ root[:] = [root, root, None]
+ self.__map.clear()
+ except AttributeError:
+ pass
+ dict.clear(self)
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ root = self.__root
+ if last:
+ link = root[0]
+ link_prev = link[0]
+ link_prev[1] = root
+ root[0] = link_prev
+ else:
+ link = root[1]
+ link_next = link[1]
+ root[1] = link_next
+ link_next[0] = root
+ key = link[2]
+ del self.__map[key]
+ value = dict.pop(self, key)
+ return key, value
+
+ # -- the following methods do not depend on the internal structure --
+
+ def keys(self):
+ 'od.keys() -> list of keys in od'
+ return list(self)
+
+ def values(self):
+ 'od.values() -> list of values in od'
+ return [self[key] for key in self]
+
+ def items(self):
+ 'od.items() -> list of (key, value) pairs in od'
+ return [(key, self[key]) for key in self]
+
+ def iterkeys(self):
+ 'od.iterkeys() -> an iterator over the keys in od'
+ return iter(self)
+
+ def itervalues(self):
+ 'od.itervalues -> an iterator over the values in od'
+ for k in self:
+ yield self[k]
+
+ def iteritems(self):
+ 'od.iteritems -> an iterator over the (key, value) items in od'
+ for k in self:
+ yield (k, self[k])
+
+ def update(*args, **kwds):
+ '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
+
+ If E is a dict instance, does: for k in E: od[k] = E[k]
+ If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
+ Or if E is an iterable of items, does: for k, v in E: od[k] = v
+ In either case, this is followed by: for k, v in F.items(): od[k] = v
+
+ '''
+ if len(args) > 2:
+ raise TypeError('update() takes at most 2 positional '
+ 'arguments (%d given)' % (len(args),))
+ elif not args:
+ raise TypeError('update() takes at least 1 argument (0 given)')
+ self = args[0]
+ # Make progressively weaker assumptions about "other"
+ other = ()
+ if len(args) == 2:
+ other = args[1]
+ if isinstance(other, dict):
+ for key in other:
+ self[key] = other[key]
+ elif hasattr(other, 'keys'):
+ for key in other.keys():
+ self[key] = other[key]
+ else:
+ for key, value in other:
+ self[key] = value
+ for key, value in kwds.items():
+ self[key] = value
+
+ __update = update # let subclasses override update without breaking __init__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def __repr__(self, _repr_running={}):
+ 'od.__repr__() <==> repr(od)'
+ call_key = id(self), _get_ident()
+ if call_key in _repr_running:
+ return '...'
+ _repr_running[call_key] = 1
+ try:
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+ finally:
+ del _repr_running[call_key]
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+ and values equal to v (which defaults to None).
+
+ '''
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and self.items() == other.items()
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ # -- the following methods are only used in Python 2.7 --
+
+ def viewkeys(self):
+ "od.viewkeys() -> a set-like object providing a view on od's keys"
+ return KeysView(self)
+
+ def viewvalues(self):
+ "od.viewvalues() -> an object providing a view on od's values"
+ return ValuesView(self)
+
+ def viewitems(self):
+ "od.viewitems() -> a set-like object providing a view on od's items"
+ return ItemsView(self)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
new file mode 100644
index 00000000..31df5012
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
@@ -0,0 +1,852 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# SPDX-License-Identifier: MIT
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ if from_value is None:
+ raise value
+ raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ raise value from from_value
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
new file mode 100644
index 00000000..2aeeeff9
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: MIT
+import sys
+
+try:
+ # Our match_hostname function is the same as 3.5's, so we only want to
+ # import the match_hostname function if it's at least that good.
+ if sys.version_info < (3, 5):
+ raise ImportError("Fallback to vendored code")
+
+ from ssl import CertificateError, match_hostname
+except ImportError:
+ try:
+ # Backport of the function from a pypi module
+ from backports.ssl_match_hostname import CertificateError, match_hostname
+ except ImportError:
+ # Our vendored copy
+ from ._implementation import CertificateError, match_hostname
+
+# Not needed, but documenting what we provide.
+__all__ = ('CertificateError', 'match_hostname')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
new file mode 100644
index 00000000..647e081d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
@@ -0,0 +1,156 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# SPDX-License-Identifier: Python-2.0
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching. This allows
+# backports.ssl_match_hostname to continue to be used all the way back to
+# python-2.4.
+try:
+ import ipaddress
+except ImportError:
+ ipaddress = None
+
+__version__ = '3.5.0.1'
+
+
+class CertificateError(ValueError):
+ pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r'.')
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count('*')
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn))
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+ return pat.match(hostname)
+
+
+def _to_unicode(obj):
+ if isinstance(obj, str) and sys.version_info < (3,):
+ obj = unicode(obj, encoding='ascii', errors='strict')
+ return obj
+
+def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ # Divergence from upstream: ipaddress can't handle byte str
+ ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+ return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED")
+ try:
+ # Divergence from upstream: ipaddress can't handle byte str
+ host_ip = ipaddress.ip_address(_to_unicode(hostname))
+ except ValueError:
+ # Not an IP address (common case)
+ host_ip = None
+ except UnicodeError:
+ # Divergence from upstream: Have to deal with ipaddress not taking
+ # byte strings. addresses should be all ascii, so we consider it not
+ # an ipaddress in this case
+ host_ip = None
+ except AttributeError:
+ # Divergence from upstream: Make ipaddress library optional
+ if ipaddress is None:
+ host_ip = None
+ else:
+ raise
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == 'IP Address':
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r "
+ "doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r "
+ "doesn't match %r"
+ % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or "
+ "subjectAltName fields were found")
diff --git a/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
new file mode 100644
index 00000000..adea9bc0
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
@@ -0,0 +1,441 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import collections
+import functools
+import logging
+
+from ._collections import RecentlyUsedContainer
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from .connectionpool import port_by_scheme
+from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
+from .packages.six.moves.urllib.parse import urljoin
+from .request import RequestMethods
+from .util.url import parse_url
+from .util.retry import Retry
+
+
+__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
+
+
+log = logging.getLogger(__name__)
+
+SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
+ 'ssl_version', 'ca_cert_dir', 'ssl_context')
+
+# All known keyword arguments that could be provided to the pool manager, its
+# pools, or the underlying connections. This is used to construct a pool key.
+_key_fields = (
+ 'key_scheme', # str
+ 'key_host', # str
+ 'key_port', # int
+ 'key_timeout', # int or float or Timeout
+ 'key_retries', # int or Retry
+ 'key_strict', # bool
+ 'key_block', # bool
+ 'key_source_address', # str
+ 'key_key_file', # str
+ 'key_cert_file', # str
+ 'key_cert_reqs', # str
+ 'key_ca_certs', # str
+ 'key_ssl_version', # str
+ 'key_ca_cert_dir', # str
+ 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
+ 'key_maxsize', # int
+ 'key_headers', # dict
+ 'key__proxy', # parsed proxy url
+ 'key__proxy_headers', # dict
+ 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
+ 'key__socks_options', # dict
+ 'key_assert_hostname', # bool or string
+ 'key_assert_fingerprint', # str
+)
+
+#: The namedtuple class used to construct keys for the connection pool.
+#: All custom key schemes should include the fields in this key at a minimum.
+PoolKey = collections.namedtuple('PoolKey', _key_fields)
+
+
+def _default_key_normalizer(key_class, request_context):
+ """
+ Create a pool key out of a request context dictionary.
+
+ According to RFC 3986, both the scheme and host are case-insensitive.
+ Therefore, this function normalizes both before constructing the pool
+ key for an HTTPS request. If you wish to change this behaviour, provide
+ alternate callables to ``key_fn_by_scheme``.
+
+ :param key_class:
+ The class to use when constructing the key. This should be a namedtuple
+ with the ``scheme`` and ``host`` keys at a minimum.
+ :type key_class: namedtuple
+ :param request_context:
+ A dictionary-like object that contain the context for a request.
+ :type request_context: dict
+
+ :return: A namedtuple that can be used as a connection pool key.
+ :rtype: PoolKey
+ """
+ # Since we mutate the dictionary, make a copy first
+ context = request_context.copy()
+ context['scheme'] = context['scheme'].lower()
+ context['host'] = context['host'].lower()
+
+ # These are both dictionaries and need to be transformed into frozensets
+ for key in ('headers', '_proxy_headers', '_socks_options'):
+ if key in context and context[key] is not None:
+ context[key] = frozenset(context[key].items())
+
+ # The socket_options key may be a list and needs to be transformed into a
+ # tuple.
+ socket_opts = context.get('socket_options')
+ if socket_opts is not None:
+ context['socket_options'] = tuple(socket_opts)
+
+ # Map the kwargs to the names in the namedtuple - this is necessary since
+ # namedtuples can't have fields starting with '_'.
+ for key in list(context.keys()):
+ context['key_' + key] = context.pop(key)
+
+ # Default to ``None`` for keys missing from the context
+ for field in key_class._fields:
+ if field not in context:
+ context[field] = None
+
+ return key_class(**context)
+
+
+#: A dictionary that maps a scheme to a callable that creates a pool key.
+#: This can be used to alter the way pool keys are constructed, if desired.
+#: Each PoolManager makes a copy of this dictionary so they can be configured
+#: globally here, or individually on the instance.
+key_fn_by_scheme = {
+ 'http': functools.partial(_default_key_normalizer, PoolKey),
+ 'https': functools.partial(_default_key_normalizer, PoolKey),
+}
+
+pool_classes_by_scheme = {
+ 'http': HTTPConnectionPool,
+ 'https': HTTPSConnectionPool,
+}
+
+
+class PoolManager(RequestMethods):
+ """
+ Allows for arbitrary requests while transparently keeping track of
+ necessary connection pools for you.
+
+ :param num_pools:
+ Number of connection pools to cache before discarding the least
+ recently used pool.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param \\**connection_pool_kw:
+ Additional parameters are used to create fresh
+ :class:`urllib3.connectionpool.ConnectionPool` instances.
+
+ Example::
+
+ >>> manager = PoolManager(num_pools=2)
+ >>> r = manager.request('GET', 'http://google.com/')
+ >>> r = manager.request('GET', 'http://google.com/mail')
+ >>> r = manager.request('GET', 'http://yahoo.com/')
+ >>> len(manager.pools)
+ 2
+
+ """
+
+ proxy = None
+
+ def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
+ RequestMethods.__init__(self, headers)
+ self.connection_pool_kw = connection_pool_kw
+ self.pools = RecentlyUsedContainer(num_pools,
+ dispose_func=lambda p: p.close())
+
+ # Locally set the pool classes and keys so other PoolManagers can
+ # override them.
+ self.pool_classes_by_scheme = pool_classes_by_scheme
+ self.key_fn_by_scheme = key_fn_by_scheme.copy()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.clear()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def _new_pool(self, scheme, host, port, request_context=None):
+ """
+ Create a new :class:`ConnectionPool` based on host, port, scheme, and
+ any additional pool keyword arguments.
+
+ If ``request_context`` is provided, it is provided as keyword arguments
+ to the pool class used. This method is used to actually create the
+ connection pools handed out by :meth:`connection_from_url` and
+ companion methods. It is intended to be overridden for customization.
+ """
+ pool_cls = self.pool_classes_by_scheme[scheme]
+ if request_context is None:
+ request_context = self.connection_pool_kw.copy()
+
+ # Although the context has everything necessary to create the pool,
+ # this function has historically only used the scheme, host, and port
+ # in the positional args. When an API change is acceptable these can
+ # be removed.
+ for key in ('scheme', 'host', 'port'):
+ request_context.pop(key, None)
+
+ if scheme == 'http':
+ for kw in SSL_KEYWORDS:
+ request_context.pop(kw, None)
+
+ return pool_cls(host, port, **request_context)
+
+ def clear(self):
+ """
+ Empty our store of pools and direct them all to close.
+
+ This will not affect in-flight connections, but they will not be
+ re-used after completion.
+ """
+ self.pools.clear()
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ """
+ Get a :class:`ConnectionPool` based on the host, port, and scheme.
+
+ If ``port`` isn't given, it will be derived from the ``scheme`` using
+ ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
+ provided, it is merged with the instance's ``connection_pool_kw``
+ variable and used to create the new connection pool, if one is
+ needed.
+ """
+
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ request_context = self._merge_pool_kwargs(pool_kwargs)
+ request_context['scheme'] = scheme or 'http'
+ if not port:
+ port = port_by_scheme.get(request_context['scheme'].lower(), 80)
+ request_context['port'] = port
+ request_context['host'] = host
+
+ return self.connection_from_context(request_context)
+
+ def connection_from_context(self, request_context):
+ """
+ Get a :class:`ConnectionPool` based on the request context.
+
+ ``request_context`` must at least contain the ``scheme`` key and its
+ value must be a key in ``key_fn_by_scheme`` instance variable.
+ """
+ scheme = request_context['scheme'].lower()
+ pool_key_constructor = self.key_fn_by_scheme[scheme]
+ pool_key = pool_key_constructor(request_context)
+
+ return self.connection_from_pool_key(pool_key, request_context=request_context)
+
+ def connection_from_pool_key(self, pool_key, request_context=None):
+ """
+ Get a :class:`ConnectionPool` based on the provided pool key.
+
+ ``pool_key`` should be a namedtuple that only contains immutable
+ objects. At a minimum it must have the ``scheme``, ``host``, and
+ ``port`` fields.
+ """
+ with self.pools.lock:
+ # If the scheme, host, or port doesn't match existing open
+ # connections, open a new ConnectionPool.
+ pool = self.pools.get(pool_key)
+ if pool:
+ return pool
+
+ # Make a fresh ConnectionPool of the desired type
+ scheme = request_context['scheme']
+ host = request_context['host']
+ port = request_context['port']
+ pool = self._new_pool(scheme, host, port, request_context=request_context)
+ self.pools[pool_key] = pool
+
+ return pool
+
+ def connection_from_url(self, url, pool_kwargs=None):
+ """
+ Similar to :func:`urllib3.connectionpool.connection_from_url`.
+
+ If ``pool_kwargs`` is not provided and a new pool needs to be
+ constructed, ``self.connection_pool_kw`` is used to initialize
+ the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
+ is provided, it is used instead. Note that if a new pool does not
+ need to be created for the request, the provided ``pool_kwargs`` are
+ not used.
+ """
+ u = parse_url(url)
+ return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
+ pool_kwargs=pool_kwargs)
+
+ def _merge_pool_kwargs(self, override):
+ """
+ Merge a dictionary of override values for self.connection_pool_kw.
+
+ This does not modify self.connection_pool_kw and returns a new dict.
+ Any keys in the override dictionary with a value of ``None`` are
+ removed from the merged dictionary.
+ """
+ base_pool_kwargs = self.connection_pool_kw.copy()
+ if override:
+ for key, value in override.items():
+ if value is None:
+ try:
+ del base_pool_kwargs[key]
+ except KeyError:
+ pass
+ else:
+ base_pool_kwargs[key] = value
+ return base_pool_kwargs
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ """
+ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
+ with custom cross-host redirect logic and only sends the request-uri
+ portion of the ``url``.
+
+ The given ``url`` parameter must be absolute, such that an appropriate
+ :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
+ """
+ u = parse_url(url)
+ conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
+
+ kw['assert_same_host'] = False
+ kw['redirect'] = False
+ if 'headers' not in kw:
+ kw['headers'] = self.headers
+
+ if self.proxy is not None and u.scheme == "http":
+ response = conn.urlopen(method, url, **kw)
+ else:
+ response = conn.urlopen(method, u.request_uri, **kw)
+
+ redirect_location = redirect and response.get_redirect_location()
+ if not redirect_location:
+ return response
+
+ # Support relative URLs for redirecting.
+ redirect_location = urljoin(url, redirect_location)
+
+ # RFC 7231, Section 6.4.4
+ if response.status == 303:
+ method = 'GET'
+
+ retries = kw.get('retries')
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect)
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=conn)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise
+ return response
+
+ kw['retries'] = retries
+ kw['redirect'] = redirect
+
+ log.info("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(method, redirect_location, **kw)
+
+
+class ProxyManager(PoolManager):
+ """
+ Behaves just like :class:`PoolManager`, but sends all requests through
+ the defined proxy, using the CONNECT method for HTTPS URLs.
+
+ :param proxy_url:
+ The URL of the proxy to be used.
+
+ :param proxy_headers:
+ A dictionary contaning headers that will be sent to the proxy. In case
+ of HTTP they are being sent with each request, while in the
+ HTTPS/CONNECT case they are sent only once. Could be used for proxy
+ authentication.
+
+ Example:
+ >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
+ >>> r1 = proxy.request('GET', 'http://google.com/')
+ >>> r2 = proxy.request('GET', 'http://httpbin.org/')
+ >>> len(proxy.pools)
+ 1
+ >>> r3 = proxy.request('GET', 'https://httpbin.org/')
+ >>> r4 = proxy.request('GET', 'https://twitter.com/')
+ >>> len(proxy.pools)
+ 3
+
+ """
+
+ def __init__(self, proxy_url, num_pools=10, headers=None,
+ proxy_headers=None, **connection_pool_kw):
+
+ if isinstance(proxy_url, HTTPConnectionPool):
+ proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
+ proxy_url.port)
+ proxy = parse_url(proxy_url)
+ if not proxy.port:
+ port = port_by_scheme.get(proxy.scheme, 80)
+ proxy = proxy._replace(port=port)
+
+ if proxy.scheme not in ("http", "https"):
+ raise ProxySchemeUnknown(proxy.scheme)
+
+ self.proxy = proxy
+ self.proxy_headers = proxy_headers or {}
+
+ connection_pool_kw['_proxy'] = self.proxy
+ connection_pool_kw['_proxy_headers'] = self.proxy_headers
+
+ super(ProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw)
+
+ def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+ if scheme == "https":
+ return super(ProxyManager, self).connection_from_host(
+ host, port, scheme, pool_kwargs=pool_kwargs)
+
+ return super(ProxyManager, self).connection_from_host(
+ self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
+
+ def _set_proxy_headers(self, url, headers=None):
+ """
+ Sets headers needed by proxies: specifically, the Accept and Host
+ headers. Only sets headers not provided by the user.
+ """
+ headers_ = {'Accept': '*/*'}
+
+ netloc = parse_url(url).netloc
+ if netloc:
+ headers_['Host'] = netloc
+
+ if headers:
+ headers_.update(headers)
+ return headers_
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
+ u = parse_url(url)
+
+ if u.scheme == "http":
+ # For proxied HTTPS requests, httplib sets the necessary headers
+ # on the CONNECT to the proxy. For HTTP, we'll definitely
+ # need to set 'Host' at the very least.
+ headers = kw.get('headers', self.headers)
+ kw['headers'] = self._set_proxy_headers(url, headers)
+
+ return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
+
+
+def proxy_from_url(url, **kw):
+ return ProxyManager(proxy_url=url, **kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py
new file mode 100644
index 00000000..f7833197
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/request.py
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+
+from .filepost import encode_multipart_formdata
+from .packages.six.moves.urllib.parse import urlencode
+
+
+__all__ = ['RequestMethods']
+
+
+class RequestMethods(object):
+ """
+ Convenience mixin for classes who implement a :meth:`urlopen` method, such
+ as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
+ :class:`~urllib3.poolmanager.PoolManager`.
+
+ Provides behavior for making common types of HTTP request methods and
+ decides which type of request field encoding to use.
+
+ Specifically,
+
+ :meth:`.request_encode_url` is for sending requests whose fields are
+ encoded in the URL (such as GET, HEAD, DELETE).
+
+ :meth:`.request_encode_body` is for sending requests whose fields are
+ encoded in the *body* of the request using multipart or www-form-urlencoded
+ (such as for POST, PUT, PATCH).
+
+ :meth:`.request` is for making any kind of request, it will look up the
+ appropriate encoding format and use one of the above two methods to make
+ the request.
+
+ Initializer parameters:
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+ """
+
+ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
+
+ def __init__(self, headers=None):
+ self.headers = headers or {}
+
+ def urlopen(self, method, url, body=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **kw): # Abstract
+ raise NotImplemented("Classes extending RequestMethods must implement "
+ "their own ``urlopen`` method.")
+
+ def request(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the appropriate encoding of
+ ``fields`` based on the ``method`` used.
+
+ This is a convenience method that requires the least amount of manual
+ effort. It can be used in most situations, while still having the
+ option to drop down to more specific methods when necessary, such as
+ :meth:`request_encode_url`, :meth:`request_encode_body`,
+ or even the lowest level :meth:`urlopen`.
+ """
+ method = method.upper()
+
+ if method in self._encode_url_methods:
+ return self.request_encode_url(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+ else:
+ return self.request_encode_body(method, url, fields=fields,
+ headers=headers,
+ **urlopen_kw)
+
+ def request_encode_url(self, method, url, fields=None, headers=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the url. This is useful for request methods like GET, HEAD, DELETE, etc.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': headers}
+ extra_kw.update(urlopen_kw)
+
+ if fields:
+ url += '?' + urlencode(fields)
+
+ return self.urlopen(method, url, **extra_kw)
+
+ def request_encode_body(self, method, url, fields=None, headers=None,
+ encode_multipart=True, multipart_boundary=None,
+ **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the body. This is useful for request methods like POST, PUT, PATCH, etc.
+
+ When ``encode_multipart=True`` (default), then
+ :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
+ the payload with the appropriate content type. Otherwise
+ :meth:`urllib.urlencode` is used with the
+ 'application/x-www-form-urlencoded' content type.
+
+ Multipart encoding must be used when posting files, and it's reasonably
+ safe to use it in other times too. However, it may break request
+ signing, such as with OAuth.
+
+ Supports an optional ``fields`` parameter of key/value strings AND
+ key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
+ the MIME type is optional. For example::
+
+ fields = {
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(),
+ 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+ }
+
+ When uploading a file, providing a filename (the first parameter of the
+ tuple) is optional but recommended to best mimick behavior of browsers.
+
+ Note that if ``headers`` are supplied, the 'Content-Type' header will
+ be overwritten because it depends on the dynamic random boundary string
+ which is used to compose the body of the request. The random boundary
+ string can be explicitly set with the ``multipart_boundary`` parameter.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {'headers': {}}
+
+ if fields:
+ if 'body' in urlopen_kw:
+ raise TypeError(
+ "request got values for both 'fields' and 'body', can only specify one.")
+
+ if encode_multipart:
+ body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
+ else:
+ body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
+
+ extra_kw['body'] = body
+ extra_kw['headers'] = {'Content-Type': content_type}
+
+ extra_kw['headers'].update(headers)
+ extra_kw.update(urlopen_kw)
+
+ return self.urlopen(method, url, **extra_kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py
new file mode 100644
index 00000000..cf14a307
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/response.py
@@ -0,0 +1,623 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from contextlib import contextmanager
+import zlib
+import io
+import logging
+from socket import timeout as SocketTimeout
+from socket import error as SocketError
+
+from ._collections import HTTPHeaderDict
+from .exceptions import (
+ BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
+ ResponseNotChunked, IncompleteRead, InvalidHeader
+)
+from .packages.six import string_types as basestring, binary_type, PY3
+from .packages.six.moves import http_client as httplib
+from .connection import HTTPException, BaseSSLError
+from .util.response import is_fp_closed, is_response_to_head
+
+log = logging.getLogger(__name__)
+
+
+class DeflateDecoder(object):
+
+ def __init__(self):
+ self._first_try = True
+ self._data = binary_type()
+ self._obj = zlib.decompressobj()
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+
+ if not self._first_try:
+ return self._obj.decompress(data)
+
+ self._data += data
+ try:
+ decompressed = self._obj.decompress(data)
+ if decompressed:
+ self._first_try = False
+ self._data = None
+ return decompressed
+ except zlib.error:
+ self._first_try = False
+ self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
+ try:
+ return self.decompress(self._data)
+ finally:
+ self._data = None
+
+
+class GzipDecoder(object):
+
+ def __init__(self):
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+ return self._obj.decompress(data)
+
+
+def _get_decoder(mode):
+ if mode == 'gzip':
+ return GzipDecoder()
+
+ return DeflateDecoder()
+
+
+class HTTPResponse(io.IOBase):
+ """
+ HTTP Response container.
+
+ Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
+ loaded and decoded on-demand when the ``data`` property is accessed. This
+ class is also compatible with the Python standard library's :mod:`io`
+ module, and can hence be treated as a readable object in the context of that
+ framework.
+
+ Extra parameters for behaviour not present in httplib.HTTPResponse:
+
+ :param preload_content:
+ If True, the response's body will be preloaded during construction.
+
+ :param decode_content:
+ If True, attempts to decode specific content-encoding's based on headers
+ (like 'gzip' and 'deflate') will be skipped and raw data will be used
+ instead.
+
+ :param original_response:
+ When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
+ object, it's convenient to include the original for debug purposes. It's
+ otherwise unused.
+
+ :param retries:
+ The retries contains the last :class:`~urllib3.util.retry.Retry` that
+ was used during the request.
+
+ :param enforce_content_length:
+ Enforce content length checking. Body returned by server must match
+ value of Content-Length header, if present. Otherwise, raise error.
+ """
+
+ CONTENT_DECODERS = ['gzip', 'deflate']
+ REDIRECT_STATUSES = [301, 302, 303, 307, 308]
+
+ def __init__(self, body='', headers=None, status=0, version=0, reason=None,
+ strict=0, preload_content=True, decode_content=True,
+ original_response=None, pool=None, connection=None,
+ retries=None, enforce_content_length=False, request_method=None):
+
+ if isinstance(headers, HTTPHeaderDict):
+ self.headers = headers
+ else:
+ self.headers = HTTPHeaderDict(headers)
+ self.status = status
+ self.version = version
+ self.reason = reason
+ self.strict = strict
+ self.decode_content = decode_content
+ self.retries = retries
+ self.enforce_content_length = enforce_content_length
+
+ self._decoder = None
+ self._body = None
+ self._fp = None
+ self._original_response = original_response
+ self._fp_bytes_read = 0
+
+ if body and isinstance(body, (basestring, binary_type)):
+ self._body = body
+
+ self._pool = pool
+ self._connection = connection
+
+ if hasattr(body, 'read'):
+ self._fp = body
+
+ # Are we using the chunked-style of transfer encoding?
+ self.chunked = False
+ self.chunk_left = None
+ tr_enc = self.headers.get('transfer-encoding', '').lower()
+ # Don't incur the penalty of creating a list and then discarding it
+ encodings = (enc.strip() for enc in tr_enc.split(","))
+ if "chunked" in encodings:
+ self.chunked = True
+
+ # Determine length of response
+ self.length_remaining = self._init_length(request_method)
+
+ # If requested, preload the body.
+ if preload_content and not self._body:
+ self._body = self.read(decode_content=decode_content)
+
+ def get_redirect_location(self):
+ """
+ Should we redirect and where to?
+
+ :returns: Truthy redirect location string if we got a redirect status
+ code and valid location. ``None`` if redirect status and no
+ location. ``False`` if not a redirect status code.
+ """
+ if self.status in self.REDIRECT_STATUSES:
+ return self.headers.get('location')
+
+ return False
+
+ def release_conn(self):
+ if not self._pool or not self._connection:
+ return
+
+ self._pool._put_conn(self._connection)
+ self._connection = None
+
+ @property
+ def data(self):
+ # For backwords-compat with earlier urllib3 0.4 and earlier.
+ if self._body:
+ return self._body
+
+ if self._fp:
+ return self.read(cache_content=True)
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def tell(self):
+ """
+ Obtain the number of bytes pulled over the wire so far. May differ from
+ the amount of content returned by :meth:``HTTPResponse.read`` if bytes
+ are encoded on the wire (e.g, compressed).
+ """
+ return self._fp_bytes_read
+
+ def _init_length(self, request_method):
+ """
+ Set initial length value for Response content if available.
+ """
+ length = self.headers.get('content-length')
+
+ if length is not None and self.chunked:
+ # This Response will fail with an IncompleteRead if it can't be
+ # received as chunked. This method falls back to attempt reading
+ # the response before raising an exception.
+ log.warning("Received response with both Content-Length and "
+ "Transfer-Encoding set. This is expressly forbidden "
+ "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
+ "attempting to process response as Transfer-Encoding: "
+ "chunked.")
+ return None
+
+ elif length is not None:
+ try:
+ # RFC 7230 section 3.3.2 specifies multiple content lengths can
+ # be sent in a single Content-Length header
+ # (e.g. Content-Length: 42, 42). This line ensures the values
+ # are all valid ints and that as long as the `set` length is 1,
+ # all values are the same. Otherwise, the header is invalid.
+ lengths = set([int(val) for val in length.split(',')])
+ if len(lengths) > 1:
+ raise InvalidHeader("Content-Length contained multiple "
+ "unmatching values (%s)" % length)
+ length = lengths.pop()
+ except ValueError:
+ length = None
+ else:
+ if length < 0:
+ length = None
+
+ # Convert status to int for comparison
+ # In some cases, httplib returns a status of "_UNKNOWN"
+ try:
+ status = int(self.status)
+ except ValueError:
+ status = 0
+
+ # Check for responses that shouldn't include a body
+ if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
+ length = 0
+
+ return length
+
+ def _init_decoder(self):
+ """
+ Set-up the _decoder attribute if necessary.
+ """
+ # Note: content-encoding value should be case-insensitive, per RFC 7230
+ # Section 3.2
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
+ self._decoder = _get_decoder(content_encoding)
+
+ def _decode(self, data, decode_content, flush_decoder):
+ """
+ Decode the data passed in and potentially flush the decoder.
+ """
+ try:
+ if decode_content and self._decoder:
+ data = self._decoder.decompress(data)
+ except (IOError, zlib.error) as e:
+ content_encoding = self.headers.get('content-encoding', '').lower()
+ raise DecodeError(
+ "Received response with content-encoding: %s, but "
+ "failed to decode it." % content_encoding, e)
+
+ if flush_decoder and decode_content:
+ data += self._flush_decoder()
+
+ return data
+
+ def _flush_decoder(self):
+ """
+ Flushes the decoder. Should only be called if the decoder is actually
+ being used.
+ """
+ if self._decoder:
+ buf = self._decoder.decompress(b'')
+ return buf + self._decoder.flush()
+
+ return b''
+
+ @contextmanager
+ def _error_catcher(self):
+ """
+ Catch low-level python exceptions, instead re-raising urllib3
+ variants, so that low-level exceptions are not leaked in the
+ high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ clean_exit = False
+
+ try:
+ try:
+ yield
+
+ except SocketTimeout:
+ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+ # there is yet no clean way to get at it from this context.
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except BaseSSLError as e:
+ # FIXME: Is there a better way to differentiate between SSLErrors?
+ if 'read operation timed out' not in str(e): # Defensive:
+ # This shouldn't happen but just in case we're missing an edge
+ # case, let's avoid swallowing SSL errors.
+ raise
+
+ raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+ except (HTTPException, SocketError) as e:
+ # This includes IncompleteRead.
+ raise ProtocolError('Connection broken: %r' % e, e)
+
+ # If no exception is thrown, we should avoid cleaning up
+ # unnecessarily.
+ clean_exit = True
+ finally:
+ # If we didn't terminate cleanly, we need to throw away our
+ # connection.
+ if not clean_exit:
+ # The response may not be closed but we're not going to use it
+ # anymore so close it now to ensure that the connection is
+ # released back to the pool.
+ if self._original_response:
+ self._original_response.close()
+
+ # Closing the response may not actually be sufficient to close
+ # everything, so if we have a hold of the connection close that
+ # too.
+ if self._connection:
+ self._connection.close()
+
+ # If we hold the original response but it's closed now, we should
+ # return the connection back to the pool.
+ if self._original_response and self._original_response.isclosed():
+ self.release_conn()
+
+ def read(self, amt=None, decode_content=None, cache_content=False):
+ """
+ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
+ parameters: ``decode_content`` and ``cache_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param cache_content:
+ If True, will save the returned data such that the same result is
+ returned despite of the state of the underlying file object. This
+ is useful if you want the ``.data`` property to continue working
+ after having ``.read()`` the file object. (Overridden if ``amt`` is
+ set.)
+ """
+ self._init_decoder()
+ if decode_content is None:
+ decode_content = self.decode_content
+
+ if self._fp is None:
+ return
+
+ flush_decoder = False
+ data = None
+
+ with self._error_catcher():
+ if amt is None:
+ # cStringIO doesn't like amt=None
+ data = self._fp.read()
+ flush_decoder = True
+ else:
+ cache_content = False
+ data = self._fp.read(amt)
+ if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
+ # Close the connection when no data is returned
+ #
+ # This is redundant to what httplib/http.client _should_
+ # already do. However, versions of python released before
+ # December 15, 2012 (http://bugs.python.org/issue16298) do
+ # not properly close the connection in all cases. There is
+ # no harm in redundantly calling close.
+ self._fp.close()
+ flush_decoder = True
+ if self.enforce_content_length and self.length_remaining not in (0, None):
+ # This is an edge case that httplib failed to cover due
+ # to concerns of backward compatibility. We're
+ # addressing it here to make sure IncompleteRead is
+ # raised during streaming, so all calls with incorrect
+ # Content-Length are caught.
+ raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
+
+ if data:
+ self._fp_bytes_read += len(data)
+ if self.length_remaining is not None:
+ self.length_remaining -= len(data)
+
+ data = self._decode(data, decode_content, flush_decoder)
+
+ if cache_content:
+ self._body = data
+
+ return data
+
+ def stream(self, amt=2**16, decode_content=None):
+ """
+ A generator wrapper for the read() method. A call will block until
+ ``amt`` bytes have been read from the connection or until the
+ connection is closed.
+
+ :param amt:
+ How much of the content to read. The generator will return up to
+ much data per iteration, but may return less. This is particularly
+ likely when using compressed data. However, the empty string will
+ never be returned.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ if self.chunked and self.supports_chunked_reads():
+ for line in self.read_chunked(amt, decode_content=decode_content):
+ yield line
+ else:
+ while not is_fp_closed(self._fp):
+ data = self.read(amt=amt, decode_content=decode_content)
+
+ if data:
+ yield data
+
+ @classmethod
+ def from_httplib(ResponseCls, r, **response_kw):
+ """
+ Given an :class:`httplib.HTTPResponse` instance ``r``, return a
+ corresponding :class:`urllib3.response.HTTPResponse` object.
+
+ Remaining parameters are passed to the HTTPResponse constructor, along
+ with ``original_response=r``.
+ """
+ headers = r.msg
+
+ if not isinstance(headers, HTTPHeaderDict):
+ if PY3: # Python 3
+ headers = HTTPHeaderDict(headers.items())
+ else: # Python 2
+ headers = HTTPHeaderDict.from_httplib(headers)
+
+ # HTTPResponse objects in Python 3 don't have a .strict attribute
+ strict = getattr(r, 'strict', 0)
+ resp = ResponseCls(body=r,
+ headers=headers,
+ status=r.status,
+ version=r.version,
+ reason=r.reason,
+ strict=strict,
+ original_response=r,
+ **response_kw)
+ return resp
+
+ # Backwards-compatibility methods for httplib.HTTPResponse
+ def getheaders(self):
+ return self.headers
+
+ def getheader(self, name, default=None):
+ return self.headers.get(name, default)
+
+ # Overrides from io.IOBase
+ def close(self):
+ if not self.closed:
+ self._fp.close()
+
+ if self._connection:
+ self._connection.close()
+
+ @property
+ def closed(self):
+ if self._fp is None:
+ return True
+ elif hasattr(self._fp, 'isclosed'):
+ return self._fp.isclosed()
+ elif hasattr(self._fp, 'closed'):
+ return self._fp.closed
+ else:
+ return True
+
+ def fileno(self):
+ if self._fp is None:
+ raise IOError("HTTPResponse has no file to get a fileno from")
+ elif hasattr(self._fp, "fileno"):
+ return self._fp.fileno()
+ else:
+ raise IOError("The file-like object this HTTPResponse is wrapped "
+ "around has no file descriptor")
+
+ def flush(self):
+ if self._fp is not None and hasattr(self._fp, 'flush'):
+ return self._fp.flush()
+
+ def readable(self):
+ # This method is required for `io` module compatibility.
+ return True
+
+ def readinto(self, b):
+ # This method is required for `io` module compatibility.
+ temp = self.read(len(b))
+ if len(temp) == 0:
+ return 0
+ else:
+ b[:len(temp)] = temp
+ return len(temp)
+
+ def supports_chunked_reads(self):
+ """
+ Checks if the underlying file-like object looks like a
+ httplib.HTTPResponse object. We do this by testing for the fp
+ attribute. If it is present we assume it returns raw chunks as
+ processed by read_chunked().
+ """
+ return hasattr(self._fp, 'fp')
+
+ def _update_chunk_length(self):
+ # First, we'll figure out length of a chunk and then
+ # we'll try to read it from socket.
+ if self.chunk_left is not None:
+ return
+ line = self._fp.fp.readline()
+ line = line.split(b';', 1)[0]
+ try:
+ self.chunk_left = int(line, 16)
+ except ValueError:
+ # Invalid chunked protocol response, abort.
+ self.close()
+ raise httplib.IncompleteRead(line)
+
+ def _handle_chunk(self, amt):
+ returned_chunk = None
+ if amt is None:
+ chunk = self._fp._safe_read(self.chunk_left)
+ returned_chunk = chunk
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ elif amt < self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self.chunk_left = self.chunk_left - amt
+ returned_chunk = value
+ elif amt == self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ returned_chunk = value
+ else: # amt > self.chunk_left
+ returned_chunk = self._fp._safe_read(self.chunk_left)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ return returned_chunk
+
+ def read_chunked(self, amt=None, decode_content=None):
+ """
+ Similar to :meth:`HTTPResponse.read`, but with an additional
+ parameter: ``decode_content``.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ self._init_decoder()
+ # FIXME: Rewrite this method and make it a class with a better structured logic.
+ if not self.chunked:
+ raise ResponseNotChunked(
+ "Response is not chunked. "
+ "Header 'transfer-encoding: chunked' is missing.")
+ if not self.supports_chunked_reads():
+ raise BodyNotHttplibCompatible(
+ "Body should be httplib.HTTPResponse like. "
+ "It should have have an fp attribute which returns raw chunks.")
+
+ # Don't bother reading the body of a HEAD request.
+ if self._original_response and is_response_to_head(self._original_response):
+ self._original_response.close()
+ return
+
+ with self._error_catcher():
+ while True:
+ self._update_chunk_length()
+ if self.chunk_left == 0:
+ break
+ chunk = self._handle_chunk(amt)
+ decoded = self._decode(chunk, decode_content=decode_content,
+ flush_decoder=False)
+ if decoded:
+ yield decoded
+
+ if decode_content:
+ # On CPython and PyPy, we should never need to flush the
+ # decoder. However, on Jython we *might* need to, so
+ # lets defensively do it anyway.
+ decoded = self._flush_decoder()
+ if decoded: # Platform-specific: Jython.
+ yield decoded
+
+ # Chunk content ends with \r\n: discard it.
+ while True:
+ line = self._fp.fp.readline()
+ if not line:
+ # Some sites may not end with '\r\n'.
+ break
+ if line == b'\r\n':
+ break
+
+ # We read everything; close the "file".
+ if self._original_response:
+ self._original_response.close()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
new file mode 100644
index 00000000..bba628d9
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import make_headers
+from .response import is_fp_closed
+from .ssl_ import (
+ SSLContext,
+ HAS_SNI,
+ IS_PYOPENSSL,
+ IS_SECURETRANSPORT,
+ assert_fingerprint,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .timeout import (
+ current_time,
+ Timeout,
+)
+
+from .retry import Retry
+from .url import (
+ get_host,
+ parse_url,
+ split_first,
+ Url,
+)
+from .wait import (
+ wait_for_read,
+ wait_for_write
+)
+
+__all__ = (
+ 'HAS_SNI',
+ 'IS_PYOPENSSL',
+ 'IS_SECURETRANSPORT',
+ 'SSLContext',
+ 'Retry',
+ 'Timeout',
+ 'Url',
+ 'assert_fingerprint',
+ 'current_time',
+ 'is_connection_dropped',
+ 'is_fp_closed',
+ 'get_host',
+ 'parse_url',
+ 'make_headers',
+ 'resolve_cert_reqs',
+ 'resolve_ssl_version',
+ 'split_first',
+ 'ssl_wrap_socket',
+ 'wait_for_read',
+ 'wait_for_write'
+)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
new file mode 100644
index 00000000..3bd69e8f
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import socket
+from .wait import wait_for_read
+from .selectors import HAS_SELECT, SelectorError
+
+
+def is_connection_dropped(conn): # Platform-specific
+ """
+ Returns True if the connection is dropped and should be closed.
+
+ :param conn:
+ :class:`httplib.HTTPConnection` object.
+
+ Note: For platforms like AppEngine, this will always return ``False`` to
+ let the platform handle connection recycling transparently for us.
+ """
+ sock = getattr(conn, 'sock', False)
+ if sock is False: # Platform-specific: AppEngine
+ return False
+ if sock is None: # Connection already closed (such as by httplib).
+ return True
+
+ if not HAS_SELECT:
+ return False
+
+ try:
+ return bool(wait_for_read(sock, timeout=0.0))
+ except SelectorError:
+ return True
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None, socket_options=None):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ An host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ if host.startswith('['):
+ host = host.strip('[]')
+ err = None
+
+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+ # The original create_connection function always returns all records.
+ family = allowed_gai_family()
+
+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+
+ # If provided, set socket level options before connecting.
+ _set_socket_options(sock, socket_options)
+
+ if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except socket.error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+ sock = None
+
+ if err is not None:
+ raise err
+
+ raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+ if options is None:
+ return
+
+ for opt in options:
+ sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+ """This function is designed to work in the context of
+ getaddrinfo, where family=socket.AF_UNSPEC is the default and
+ will perform a DNS search for both IPv6 and IPv4 records."""
+
+ family = socket.AF_INET
+ if HAS_IPV6:
+ family = socket.AF_UNSPEC
+ return family
+
+
+def _has_ipv6(host):
+ """ Returns True if the system can bind an IPv6 address. """
+ sock = None
+ has_ipv6 = False
+
+ if socket.has_ipv6:
+ # has_ipv6 returns true if cPython was compiled with IPv6 support.
+ # It does not tell us if the system has IPv6 support enabled. To
+ # determine that we must bind to an IPv6 address.
+ # https://github.com/shazow/urllib3/pull/611
+ # https://bugs.python.org/issue658327
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind((host, 0))
+ has_ipv6 = True
+ except Exception:
+ pass
+
+ if sock:
+ sock.close()
+ return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6('::1')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
new file mode 100644
index 00000000..18f27b03
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from base64 import b64encode
+
+from ..packages.six import b, integer_types
+from ..exceptions import UnrewindableBodyError
+
+ACCEPT_ENCODING = 'gzip,deflate'
+_FAILEDTELL = object()
+
+
+def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
+ basic_auth=None, proxy_basic_auth=None, disable_cache=None):
+ """
+ Shortcuts for generating request headers.
+
+ :param keep_alive:
+ If ``True``, adds 'connection: keep-alive' header.
+
+ :param accept_encoding:
+ Can be a boolean, list, or string.
+ ``True`` translates to 'gzip,deflate'.
+ List will get joined by comma.
+ String will be used as provided.
+
+ :param user_agent:
+ String representing the user-agent you want, such as
+ "python-urllib3/0.6"
+
+ :param basic_auth:
+ Colon-separated username:password string for 'authorization: basic ...'
+ auth header.
+
+ :param proxy_basic_auth:
+ Colon-separated username:password string for 'proxy-authorization: basic ...'
+ auth header.
+
+ :param disable_cache:
+ If ``True``, adds 'cache-control: no-cache' header.
+
+ Example::
+
+ >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+ {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+ >>> make_headers(accept_encoding=True)
+ {'accept-encoding': 'gzip,deflate'}
+ """
+ headers = {}
+ if accept_encoding:
+ if isinstance(accept_encoding, str):
+ pass
+ elif isinstance(accept_encoding, list):
+ accept_encoding = ','.join(accept_encoding)
+ else:
+ accept_encoding = ACCEPT_ENCODING
+ headers['accept-encoding'] = accept_encoding
+
+ if user_agent:
+ headers['user-agent'] = user_agent
+
+ if keep_alive:
+ headers['connection'] = 'keep-alive'
+
+ if basic_auth:
+ headers['authorization'] = 'Basic ' + \
+ b64encode(b(basic_auth)).decode('utf-8')
+
+ if proxy_basic_auth:
+ headers['proxy-authorization'] = 'Basic ' + \
+ b64encode(b(proxy_basic_auth)).decode('utf-8')
+
+ if disable_cache:
+ headers['cache-control'] = 'no-cache'
+
+ return headers
+
+
+def set_file_position(body, pos):
+ """
+ If a position is provided, move file to that point.
+ Otherwise, we'll attempt to record a position for future use.
+ """
+ if pos is not None:
+ rewind_body(body, pos)
+ elif getattr(body, 'tell', None) is not None:
+ try:
+ pos = body.tell()
+ except (IOError, OSError):
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body.
+ pos = _FAILEDTELL
+
+ return pos
+
+
+def rewind_body(body, body_pos):
+ """
+ Attempt to rewind body to a certain position.
+ Primarily used for request redirects and retries.
+
+ :param body:
+ File-like object that supports seek.
+
+ :param int pos:
+ Position to seek to in file.
+ """
+ body_seek = getattr(body, 'seek', None)
+ if body_seek is not None and isinstance(body_pos, integer_types):
+ try:
+ body_seek(body_pos)
+ except (IOError, OSError):
+ raise UnrewindableBodyError("An error occurred when rewinding request "
+ "body for redirect/retry.")
+ elif body_pos is _FAILEDTELL:
+ raise UnrewindableBodyError("Unable to record file position for rewinding "
+ "request body during a redirect/retry.")
+ else:
+ raise ValueError("body_pos must be of type integer, "
+ "instead it was %s." % type(body_pos))
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
new file mode 100644
index 00000000..e4cda93d
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from ..packages.six.moves import http_client as httplib
+
+from ..exceptions import HeaderParsingError
+
+
+def is_fp_closed(obj):
+ """
+ Checks whether a given file-like object is closed.
+
+ :param obj:
+ The file-like object to check.
+ """
+
+ try:
+ # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+ # GH Issue #928
+ return obj.isclosed()
+ except AttributeError:
+ pass
+
+ try:
+ # Check via the official file-like-object way.
+ return obj.closed
+ except AttributeError:
+ pass
+
+ try:
+ # Check if the object is a container for another file-like object that
+ # gets released on exhaustion (e.g. HTTPResponse).
+ return obj.fp is None
+ except AttributeError:
+ pass
+
+ raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+ """
+ Asserts whether all headers have been successfully parsed.
+ Extracts encountered errors from the result of parsing headers.
+
+ Only works on Python 3.
+
+ :param headers: Headers to verify.
+ :type headers: `httplib.HTTPMessage`.
+
+ :raises urllib3.exceptions.HeaderParsingError:
+ If parsing errors are found.
+ """
+
+ # This will fail silently if we pass in the wrong kind of parameter.
+ # To make debugging easier add an explicit check.
+ if not isinstance(headers, httplib.HTTPMessage):
+ raise TypeError('expected httplib.Message, got {0}.'.format(
+ type(headers)))
+
+ defects = getattr(headers, 'defects', None)
+ get_payload = getattr(headers, 'get_payload', None)
+
+ unparsed_data = None
+ if get_payload: # Platform-specific: Python 3.
+ unparsed_data = get_payload()
+
+ if defects or unparsed_data:
+ raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+ """
+ Checks whether the request of a response has been a HEAD-request.
+ Handles the quirks of AppEngine.
+
+ :param conn:
+ :type conn: :class:`httplib.HTTPResponse`
+ """
+ # FIXME: Can we do this somehow without accessing private httplib _method?
+ method = response._method
+ if isinstance(method, int): # Platform-specific: Appengine
+ return method == 3
+ return method.upper() == 'HEAD'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
new file mode 100644
index 00000000..61e63afe
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
@@ -0,0 +1,402 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import time
+import logging
+from collections import namedtuple
+from itertools import takewhile
+import email
+import re
+
+from ..exceptions import (
+ ConnectTimeoutError,
+ MaxRetryError,
+ ProtocolError,
+ ReadTimeoutError,
+ ResponseError,
+ InvalidHeader,
+)
+from ..packages import six
+
+
+log = logging.getLogger(__name__)
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
+ "status", "redirect_location"])
+
+
+class Retry(object):
+ """ Retry configuration.
+
+ Each retry attempt will create a new Retry object with updated values, so
+ they can be safely reused.
+
+ Retries can be defined as a default for a pool::
+
+ retries = Retry(connect=5, read=2, redirect=5)
+ http = PoolManager(retries=retries)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+ Retries can be disabled by passing ``False``::
+
+ response = http.request('GET', 'http://example.com/', retries=False)
+
+ Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+ retries are disabled, in which case the causing exception will be raised.
+
+ :param int total:
+ Total number of retries to allow. Takes precedence over other counts.
+
+ Set to ``None`` to remove this constraint and fall back on other
+ counts. It's a good idea to set this to some sensibly-high value to
+ account for unexpected edge cases and avoid infinite retry loops.
+
+ Set to ``0`` to fail on the first retry.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int connect:
+ How many connection-related errors to retry on.
+
+ These are errors raised before the request is sent to the remote server,
+ which we assume has not triggered the server to process the request.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int read:
+ How many times to retry on read errors.
+
+ These errors are raised after the request was sent to the server, so the
+ request may have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int redirect:
+ How many redirects to perform. Limit this to avoid infinite redirect
+ loops.
+
+ A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+ 308.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int status:
+ How many times to retry on bad status codes.
+
+ These are retries made on responses, where status code matches
+ ``status_forcelist``.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param iterable method_whitelist:
+ Set of uppercased HTTP method verbs that we should retry on.
+
+ By default, we only retry on methods which are considered to be
+ idempotent (multiple requests with the same parameters end with the
+ same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
+
+ Set to a ``False`` value to retry on any verb.
+
+ :param iterable status_forcelist:
+ A set of integer HTTP status codes that we should force a retry on.
+ A retry is initiated if the request method is in ``method_whitelist``
+ and the response status code is in ``status_forcelist``.
+
+ By default, this is disabled with ``None``.
+
+ :param float backoff_factor:
+ A backoff factor to apply between attempts after the second try
+ (most errors are resolved immediately by a second try without a
+ delay). urllib3 will sleep for::
+
+ {backoff factor} * (2 ^ ({number of total retries} - 1))
+
+ seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+ for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+ than :attr:`Retry.BACKOFF_MAX`.
+
+ By default, backoff is disabled (set to 0).
+
+ :param bool raise_on_redirect: Whether, if the number of redirects is
+ exhausted, to raise a MaxRetryError, or to return a response with a
+ response code in the 3xx range.
+
+ :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+ whether we should raise an exception, or return a response,
+ if status falls in ``status_forcelist`` range and retries have
+ been exhausted.
+
+ :param tuple history: The history of the request encountered during
+ each call to :meth:`~Retry.increment`. The list is in the order
+ the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+ :param bool respect_retry_after_header:
+ Whether to respect Retry-After header on status codes defined as
+ :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+ """
+
+ DEFAULT_METHOD_WHITELIST = frozenset([
+ 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
+
+ RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+ #: Maximum backoff time.
+ BACKOFF_MAX = 120
+
+ def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
+ method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
+ backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
+ history=None, respect_retry_after_header=True):
+
+ self.total = total
+ self.connect = connect
+ self.read = read
+ self.status = status
+
+ if redirect is False or total is False:
+ redirect = 0
+ raise_on_redirect = False
+
+ self.redirect = redirect
+ self.status_forcelist = status_forcelist or set()
+ self.method_whitelist = method_whitelist
+ self.backoff_factor = backoff_factor
+ self.raise_on_redirect = raise_on_redirect
+ self.raise_on_status = raise_on_status
+ self.history = history or tuple()
+ self.respect_retry_after_header = respect_retry_after_header
+
+ def new(self, **kw):
+ params = dict(
+ total=self.total,
+ connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
+ method_whitelist=self.method_whitelist,
+ status_forcelist=self.status_forcelist,
+ backoff_factor=self.backoff_factor,
+ raise_on_redirect=self.raise_on_redirect,
+ raise_on_status=self.raise_on_status,
+ history=self.history,
+ )
+ params.update(kw)
+ return type(self)(**params)
+
+ @classmethod
+ def from_int(cls, retries, redirect=True, default=None):
+ """ Backwards-compatibility for the old retries format."""
+ if retries is None:
+ retries = default if default is not None else cls.DEFAULT
+
+ if isinstance(retries, Retry):
+ return retries
+
+ redirect = bool(redirect) and None
+ new_retries = cls(retries, redirect=redirect)
+ log.debug("Converted retries value: %r -> %r", retries, new_retries)
+ return new_retries
+
+ def get_backoff_time(self):
+ """ Formula for computing the current backoff
+
+ :rtype: float
+ """
+ # We want to consider only the last consecutive errors sequence (Ignore redirects).
+ consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
+ reversed(self.history))))
+ if consecutive_errors_len <= 1:
+ return 0
+
+ backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+ return min(self.BACKOFF_MAX, backoff_value)
+
+ def parse_retry_after(self, retry_after):
+ # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
+ seconds = int(retry_after)
+ else:
+ retry_date_tuple = email.utils.parsedate(retry_after)
+ if retry_date_tuple is None:
+ raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+ retry_date = time.mktime(retry_date_tuple)
+ seconds = retry_date - time.time()
+
+ if seconds < 0:
+ seconds = 0
+
+ return seconds
+
+ def get_retry_after(self, response):
+ """ Get the value of Retry-After in seconds. """
+
+ retry_after = response.getheader("Retry-After")
+
+ if retry_after is None:
+ return None
+
+ return self.parse_retry_after(retry_after)
+
+ def sleep_for_retry(self, response=None):
+ retry_after = self.get_retry_after(response)
+ if retry_after:
+ time.sleep(retry_after)
+ return True
+
+ return False
+
+ def _sleep_backoff(self):
+ backoff = self.get_backoff_time()
+ if backoff <= 0:
+ return
+ time.sleep(backoff)
+
+ def sleep(self, response=None):
+ """ Sleep between retry attempts.
+
+ This method will respect a server's ``Retry-After`` response header
+ and sleep the duration of the time requested. If that is not present, it
+ will use an exponential backoff. By default, the backoff factor is 0 and
+ this method will return immediately.
+ """
+
+ if response:
+ slept = self.sleep_for_retry(response)
+ if slept:
+ return
+
+ self._sleep_backoff()
+
+ def _is_connection_error(self, err):
+ """ Errors when we're fairly sure that the server did not receive the
+ request, so it should be safe to retry.
+ """
+ return isinstance(err, ConnectTimeoutError)
+
+ def _is_read_error(self, err):
+ """ Errors that occur after the request has been started, so we should
+ assume that the server began processing it.
+ """
+ return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+ def _is_method_retryable(self, method):
+ """ Checks if a given HTTP method should be retried upon, depending if
+ it is included on the method whitelist.
+ """
+ if self.method_whitelist and method.upper() not in self.method_whitelist:
+ return False
+
+ return True
+
+ def is_retry(self, method, status_code, has_retry_after=False):
+ """ Is this method/status code retryable? (Based on whitelists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ if not self._is_method_retryable(method):
+ return False
+
+ if self.status_forcelist and status_code in self.status_forcelist:
+ return True
+
+ return (self.total and self.respect_retry_after_header and
+ has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
+
+ def is_exhausted(self):
+ """ Are we out of retries? """
+ retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+
+ return min(retry_counts) < 0
+
+ def increment(self, method=None, url=None, response=None, error=None,
+ _pool=None, _stacktrace=None):
+ """ Return a new Retry object with incremented retry counters.
+
+ :param response: A response object, or None, if the server did not
+ return a response.
+ :type response: :class:`~urllib3.response.HTTPResponse`
+ :param Exception error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: A new ``Retry`` object.
+ """
+ if self.total is False and error:
+ # Disabled, indicate to re-raise the error.
+ raise six.reraise(type(error), error, _stacktrace)
+
+ total = self.total
+ if total is not None:
+ total -= 1
+
+ connect = self.connect
+ read = self.read
+ redirect = self.redirect
+ status_count = self.status
+ cause = 'unknown'
+ status = None
+ redirect_location = None
+
+ if error and self._is_connection_error(error):
+ # Connect retry?
+ if connect is False:
+ raise six.reraise(type(error), error, _stacktrace)
+ elif connect is not None:
+ connect -= 1
+
+ elif error and self._is_read_error(error):
+ # Read retry?
+ if read is False or not self._is_method_retryable(method):
+ raise six.reraise(type(error), error, _stacktrace)
+ elif read is not None:
+ read -= 1
+
+ elif response and response.get_redirect_location():
+ # Redirect retry?
+ if redirect is not None:
+ redirect -= 1
+ cause = 'too many redirects'
+ redirect_location = response.get_redirect_location()
+ status = response.status
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and a the given method is in the whitelist
+ cause = ResponseError.GENERIC_ERROR
+ if response and response.status:
+ if status_count is not None:
+ status_count -= 1
+ cause = ResponseError.SPECIFIC_ERROR.format(
+ status_code=response.status)
+ status = response.status
+
+ history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
+
+ new_retry = self.new(
+ total=total,
+ connect=connect, read=read, redirect=redirect, status=status_count,
+ history=history)
+
+ if new_retry.is_exhausted():
+ raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+ log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+ return new_retry
+
+ def __repr__(self):
+ return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
+ 'read={self.read}, redirect={self.redirect}, status={self.status})').format(
+ cls=type(self), self=self)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
new file mode 100644
index 00000000..de5e4983
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
@@ -0,0 +1,588 @@
+# SPDX-License-Identifier: MIT
+# Backport of selectors.py from Python 3.5+ to support Python < 3.4
+# Also has the behavior specified in PEP 475 which is to retry syscalls
+# in the case of an EINTR error. This module is required because selectors34
+# does not follow this behavior and instead returns that no dile descriptor
+# events have occurred rather than retry the syscall. The decision to drop
+# support for select.devpoll is made to maintain 100% test coverage.
+
+import errno
+import math
+import select
+import socket
+import sys
+import time
+
+from collections import namedtuple
+
+try:
+ from collections import Mapping
+except ImportError:
+ from collections.abc import Mapping
+
+try:
+ monotonic = time.monotonic
+except (AttributeError, ImportError): # Python 3.3<
+ monotonic = time.time
+
+EVENT_READ = (1 << 0)
+EVENT_WRITE = (1 << 1)
+
+HAS_SELECT = True # Variable that shows whether the platform has a selector.
+_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
+_DEFAULT_SELECTOR = None
+
+
+class SelectorError(Exception):
+ def __init__(self, errcode):
+ super(SelectorError, self).__init__()
+ self.errno = errcode
+
+ def __repr__(self):
+ return "<SelectorError errno={0}>".format(self.errno)
+
+ def __str__(self):
+ return self.__repr__()
+
+
+def _fileobj_to_fd(fileobj):
+ """ Return a file descriptor from a file object. If
+ given an integer will simply return that integer back. """
+ if isinstance(fileobj, int):
+ fd = fileobj
+ else:
+ try:
+ fd = int(fileobj.fileno())
+ except (AttributeError, TypeError, ValueError):
+ raise ValueError("Invalid file object: {0!r}".format(fileobj))
+ if fd < 0:
+ raise ValueError("Invalid file descriptor: {0}".format(fd))
+ return fd
+
+
+# Determine which function to use to wrap system calls because Python 3.5+
+# already handles the case when system calls are interrupted.
+if sys.version_info >= (3, 5):
+ def _syscall_wrapper(func, _, *args, **kwargs):
+ """ This is the short-circuit version of the below logic
+ because in Python 3.5+ all system calls automatically restart
+ and recalculate their timeouts. """
+ try:
+ return func(*args, **kwargs)
+ except (OSError, IOError, select.error) as e:
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ raise SelectorError(errcode)
+else:
+ def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
+ """ Wrapper function for syscalls that could fail due to EINTR.
+ All functions should be retried if there is time left in the timeout
+ in accordance with PEP 475. """
+ timeout = kwargs.get("timeout", None)
+ if timeout is None:
+ expires = None
+ recalc_timeout = False
+ else:
+ timeout = float(timeout)
+ if timeout < 0.0: # Timeout less than 0 treated as no timeout.
+ expires = None
+ else:
+ expires = monotonic() + timeout
+
+ args = list(args)
+ if recalc_timeout and "timeout" not in kwargs:
+ raise ValueError(
+ "Timeout must be in args or kwargs to be recalculated")
+
+ result = _SYSCALL_SENTINEL
+ while result is _SYSCALL_SENTINEL:
+ try:
+ result = func(*args, **kwargs)
+ # OSError is thrown by select.select
+ # IOError is thrown by select.epoll.poll
+ # select.error is thrown by select.poll.poll
+ # Aren't we thankful for Python 3.x rework for exceptions?
+ except (OSError, IOError, select.error) as e:
+ # select.error wasn't a subclass of OSError in the past.
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ elif hasattr(e, "args"):
+ errcode = e.args[0]
+
+ # Also test for the Windows equivalent of EINTR.
+ is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
+ errcode == errno.WSAEINTR))
+
+ if is_interrupt:
+ if expires is not None:
+ current_time = monotonic()
+ if current_time > expires:
+ raise OSError(errno=errno.ETIMEDOUT)
+ if recalc_timeout:
+ if "timeout" in kwargs:
+ kwargs["timeout"] = expires - current_time
+ continue
+ if errcode:
+ raise SelectorError(errcode)
+ else:
+ raise
+ return result
+
+
+SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
+
+
+class _SelectorMapping(Mapping):
+ """ Mapping of file objects to selector keys """
+
+ def __init__(self, selector):
+ self._selector = selector
+
+ def __len__(self):
+ return len(self._selector._fd_to_key)
+
+ def __getitem__(self, fileobj):
+ try:
+ fd = self._selector._fileobj_lookup(fileobj)
+ return self._selector._fd_to_key[fd]
+ except KeyError:
+ raise KeyError("{0!r} is not registered.".format(fileobj))
+
+ def __iter__(self):
+ return iter(self._selector._fd_to_key)
+
+
+class BaseSelector(object):
+ """ Abstract Selector class
+
+ A selector supports registering file objects to be monitored
+ for specific I/O events.
+
+ A file object is a file descriptor or any object with a
+ `fileno()` method. An arbitrary object can be attached to the
+ file object which can be used for example to store context info,
+ a callback, etc.
+
+ A selector can use various implementations (select(), poll(), epoll(),
+ and kqueue()) depending on the platform. The 'DefaultSelector' class uses
+ the most efficient implementation for the current platform.
+ """
+ def __init__(self):
+ # Maps file descriptors to keys.
+ self._fd_to_key = {}
+
+ # Read-only mapping returned by get_map()
+ self._map = _SelectorMapping(self)
+
+ def _fileobj_lookup(self, fileobj):
+ """ Return a file descriptor from a file object.
+ This wraps _fileobj_to_fd() to do an exhaustive
+ search in case the object is invalid but we still
+ have it in our map. Used by unregister() so we can
+ unregister an object that was previously registered
+ even if it is closed. It is also used by _SelectorMapping
+ """
+ try:
+ return _fileobj_to_fd(fileobj)
+ except ValueError:
+
+ # Search through all our mapped keys.
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ return key.fd
+
+ # Raise ValueError after all.
+ raise
+
+ def register(self, fileobj, events, data=None):
+ """ Register a file object for a set of events to monitor. """
+ if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
+ raise ValueError("Invalid events: {0!r}".format(events))
+
+ key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
+
+ if key.fd in self._fd_to_key:
+ raise KeyError("{0!r} (FD {1}) is already registered"
+ .format(fileobj, key.fd))
+
+ self._fd_to_key[key.fd] = key
+ return key
+
+ def unregister(self, fileobj):
+ """ Unregister a file object from being monitored. """
+ try:
+ key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ # Getting the fileno of a closed socket on Windows errors with EBADF.
+ except socket.error as e: # Platform-specific: Windows.
+ if e.errno != errno.EBADF:
+ raise
+ else:
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ self._fd_to_key.pop(key.fd)
+ break
+ else:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+ return key
+
+ def modify(self, fileobj, events, data=None):
+ """ Change a registered file object monitored events and data. """
+ # NOTE: Some subclasses optimize this operation even further.
+ try:
+ key = self._fd_to_key[self._fileobj_lookup(fileobj)]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ if events != key.events:
+ self.unregister(fileobj)
+ key = self.register(fileobj, events, data)
+
+ elif data != key.data:
+ # Use a shortcut to update the data.
+ key = key._replace(data=data)
+ self._fd_to_key[key.fd] = key
+
+ return key
+
+ def select(self, timeout=None):
+ """ Perform the actual selection until some monitored file objects
+ are ready or the timeout expires. """
+ raise NotImplementedError()
+
+ def close(self):
+ """ Close the selector. This must be called to ensure that all
+ underlying resources are freed. """
+ self._fd_to_key.clear()
+ self._map = None
+
+ def get_key(self, fileobj):
+ """ Return the key associated with a registered file object. """
+ mapping = self.get_map()
+ if mapping is None:
+ raise RuntimeError("Selector is closed")
+ try:
+ return mapping[fileobj]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ def get_map(self):
+ """ Return a mapping of file objects to selector keys """
+ return self._map
+
+ def _key_from_fd(self, fd):
+ """ Return the key associated to a given file descriptor
+ Return None if it is not found. """
+ try:
+ return self._fd_to_key[fd]
+ except KeyError:
+ return None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+# Almost all platforms have select.select()
+if hasattr(select, "select"):
+ class SelectSelector(BaseSelector):
+ """ Select-based selector. """
+ def __init__(self):
+ super(SelectSelector, self).__init__()
+ self._readers = set()
+ self._writers = set()
+
+ def register(self, fileobj, events, data=None):
+ key = super(SelectSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ self._readers.add(key.fd)
+ if events & EVENT_WRITE:
+ self._writers.add(key.fd)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(SelectSelector, self).unregister(fileobj)
+ self._readers.discard(key.fd)
+ self._writers.discard(key.fd)
+ return key
+
+ def _select(self, r, w, timeout=None):
+ """ Wrapper for select.select because timeout is a positional arg """
+ return select.select(r, w, [], timeout)
+
+ def select(self, timeout=None):
+ # Selecting on empty lists on Windows errors out.
+ if not len(self._readers) and not len(self._writers):
+ return []
+
+ timeout = None if timeout is None else max(timeout, 0.0)
+ ready = []
+ r, w, _ = _syscall_wrapper(self._select, True, self._readers,
+ self._writers, timeout)
+ r = set(r)
+ w = set(w)
+ for fd in r | w:
+ events = 0
+ if fd in r:
+ events |= EVENT_READ
+ if fd in w:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+
+if hasattr(select, "poll"):
+ class PollSelector(BaseSelector):
+ """ Poll-based selector """
+ def __init__(self):
+ super(PollSelector, self).__init__()
+ self._poll = select.poll()
+
+ def register(self, fileobj, events, data=None):
+ key = super(PollSelector, self).register(fileobj, events, data)
+ event_mask = 0
+ if events & EVENT_READ:
+ event_mask |= select.POLLIN
+ if events & EVENT_WRITE:
+ event_mask |= select.POLLOUT
+ self._poll.register(key.fd, event_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(PollSelector, self).unregister(fileobj)
+ self._poll.unregister(key.fd)
+ return key
+
+ def _wrap_poll(self, timeout=None):
+ """ Wrapper function for select.poll.poll() so that
+ _syscall_wrapper can work with only seconds. """
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0
+ else:
+ # select.poll.poll() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ timeout = math.ceil(timeout * 1e3)
+
+ result = self._poll.poll(timeout)
+ return result
+
+ def select(self, timeout=None):
+ ready = []
+ fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.POLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.POLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+
+ return ready
+
+
+if hasattr(select, "epoll"):
+ class EpollSelector(BaseSelector):
+ """ Epoll-based selector """
+ def __init__(self):
+ super(EpollSelector, self).__init__()
+ self._epoll = select.epoll()
+
+ def fileno(self):
+ return self._epoll.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(EpollSelector, self).register(fileobj, events, data)
+ events_mask = 0
+ if events & EVENT_READ:
+ events_mask |= select.EPOLLIN
+ if events & EVENT_WRITE:
+ events_mask |= select.EPOLLOUT
+ _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(EpollSelector, self).unregister(fileobj)
+ try:
+ _syscall_wrapper(self._epoll.unregister, False, key.fd)
+ except SelectorError:
+ # This can occur when the fd was closed since registry.
+ pass
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0.0
+ else:
+ # select.epoll.poll() has a resolution of 1 millisecond
+ # but luckily takes seconds so we don't need a wrapper
+ # like PollSelector. Just for better rounding.
+ timeout = math.ceil(timeout * 1e3) * 1e-3
+ timeout = float(timeout)
+ else:
+ timeout = -1.0 # epoll.poll() must have a float.
+
+ # We always want at least 1 to ensure that select can be called
+ # with no file descriptors registered. Otherwise will fail.
+ max_events = max(len(self._fd_to_key), 1)
+
+ ready = []
+ fd_events = _syscall_wrapper(self._epoll.poll, True,
+ timeout=timeout,
+ maxevents=max_events)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.EPOLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.EPOLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+ def close(self):
+ self._epoll.close()
+ super(EpollSelector, self).close()
+
+
+if hasattr(select, "kqueue"):
+ class KqueueSelector(BaseSelector):
+ """ Kqueue / Kevent-based selector """
+ def __init__(self):
+ super(KqueueSelector, self).__init__()
+ self._kqueue = select.kqueue()
+
+ def fileno(self):
+ return self._kqueue.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(KqueueSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ if events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+
+ return key
+
+ def unregister(self, fileobj):
+ key = super(KqueueSelector, self).unregister(fileobj)
+ if key.events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+ if key.events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ timeout = max(timeout, 0)
+
+ max_events = len(self._fd_to_key) * 2
+ ready_fds = {}
+
+ kevent_list = _syscall_wrapper(self._kqueue.control, True,
+ None, max_events, timeout)
+
+ for kevent in kevent_list:
+ fd = kevent.ident
+ event_mask = kevent.filter
+ events = 0
+ if event_mask == select.KQ_FILTER_READ:
+ events |= EVENT_READ
+ if event_mask == select.KQ_FILTER_WRITE:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ if key.fd not in ready_fds:
+ ready_fds[key.fd] = (key, events & key.events)
+ else:
+ old_events = ready_fds[key.fd][1]
+ ready_fds[key.fd] = (key, (events | old_events) & key.events)
+
+ return list(ready_fds.values())
+
+ def close(self):
+ self._kqueue.close()
+ super(KqueueSelector, self).close()
+
+
+if not hasattr(select, 'select'): # Platform-specific: AppEngine
+ HAS_SELECT = False
+
+
+def _can_allocate(struct):
+ """ Checks that select structs can be allocated by the underlying
+ operating system, not just advertised by the select module. We don't
+ check select() because we'll be hopeful that most platforms that
+ don't have it available will not advertise it. (ie: GAE) """
+ try:
+ # select.poll() objects won't fail until used.
+ if struct == 'poll':
+ p = select.poll()
+ p.poll(0)
+
+ # All others will fail on allocation.
+ else:
+ getattr(select, struct)().close()
+ return True
+ except (OSError, AttributeError) as e:
+ return False
+
+
+# Choose the best implementation, roughly:
+# kqueue == epoll > poll > select. Devpoll not supported. (See above)
+# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
+def DefaultSelector():
+ """ This function serves as a first call for DefaultSelector to
+ detect if the select module is being monkey-patched incorrectly
+ by eventlet, greenlet, and preserve proper behavior. """
+ global _DEFAULT_SELECTOR
+ if _DEFAULT_SELECTOR is None:
+ if _can_allocate('kqueue'):
+ _DEFAULT_SELECTOR = KqueueSelector
+ elif _can_allocate('epoll'):
+ _DEFAULT_SELECTOR = EpollSelector
+ elif _can_allocate('poll'):
+ _DEFAULT_SELECTOR = PollSelector
+ elif hasattr(select, 'select'):
+ _DEFAULT_SELECTOR = SelectSelector
+ else: # Platform-specific: AppEngine
+ raise ValueError('Platform does not have a selector')
+ return _DEFAULT_SELECTOR()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
new file mode 100644
index 00000000..ece3ec39
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
@@ -0,0 +1,338 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+import errno
+import warnings
+import hmac
+
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
+
+
+SSLContext = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {
+ 32: md5,
+ 40: sha1,
+ 64: sha256,
+}
+
+
+def _const_compare_digest_backport(a, b):
+ """
+ Compare two digests of equal length in constant time.
+
+ The digests must be of type str/bytes.
+ Returns True if the digests match, and False otherwise.
+ """
+ result = abs(len(a) - len(b))
+ for l, r in zip(bytearray(a), bytearray(b)):
+ result |= l ^ r
+ return result == 0
+
+
+_const_compare_digest = getattr(hmac, 'compare_digest',
+ _const_compare_digest_backport)
+
+
+try: # Test for SSL features
+ import ssl
+ from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
+ from ssl import HAS_SNI # Has SNI?
+except ImportError:
+ pass
+
+
+try:
+ from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
+except ImportError:
+ OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+ OP_NO_COMPRESSION = 0x20000
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs and DSS for security reasons.
+DEFAULT_CIPHERS = ':'.join([
+ 'ECDH+AESGCM',
+ 'ECDH+CHACHA20',
+ 'DH+AESGCM',
+ 'DH+CHACHA20',
+ 'ECDH+AES256',
+ 'DH+AES256',
+ 'ECDH+AES128',
+ 'DH+AES',
+ 'RSA+AESGCM',
+ 'RSA+AES',
+ '!aNULL',
+ '!eNULL',
+ '!MD5',
+])
+
+try:
+ from ssl import SSLContext # Modern SSL?
+except ImportError:
+ import sys
+
+ class SSLContext(object): # Platform-specific: Python 2 & 3.1
+ supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
+ (3, 2) <= sys.version_info)
+
+ def __init__(self, protocol_version):
+ self.protocol = protocol_version
+ # Use default values from a real SSLContext
+ self.check_hostname = False
+ self.verify_mode = ssl.CERT_NONE
+ self.ca_certs = None
+ self.options = 0
+ self.certfile = None
+ self.keyfile = None
+ self.ciphers = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def load_verify_locations(self, cafile=None, capath=None):
+ self.ca_certs = cafile
+
+ if capath is not None:
+ raise SSLError("CA directories not supported in older Pythons")
+
+ def set_ciphers(self, cipher_suite):
+ if not self.supports_set_ciphers:
+ raise TypeError(
+ 'Your version of Python does not support setting '
+ 'a custom cipher suite. Please upgrade to Python '
+ '2.7, 3.2, or later if you need this functionality.'
+ )
+ self.ciphers = cipher_suite
+
+ def wrap_socket(self, socket, server_hostname=None, server_side=False):
+ warnings.warn(
+ 'A true SSLContext object is not available. This prevents '
+ 'urllib3 from configuring SSL appropriately and may cause '
+ 'certain SSL connections to fail. You can upgrade to a newer '
+ 'version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ InsecurePlatformWarning
+ )
+ kwargs = {
+ 'keyfile': self.keyfile,
+ 'certfile': self.certfile,
+ 'ca_certs': self.ca_certs,
+ 'cert_reqs': self.verify_mode,
+ 'ssl_version': self.protocol,
+ 'server_side': server_side,
+ }
+ if self.supports_set_ciphers: # Platform-specific: Python 2.7+
+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+ else: # Platform-specific: Python 2.6
+ return wrap_socket(socket, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+ """
+ Checks if given fingerprint matches the supplied certificate.
+
+ :param cert:
+ Certificate as bytes object.
+ :param fingerprint:
+ Fingerprint as string of hexdigits, can be interspersed by colons.
+ """
+
+ fingerprint = fingerprint.replace(':', '').lower()
+ digest_length = len(fingerprint)
+ hashfunc = HASHFUNC_MAP.get(digest_length)
+ if not hashfunc:
+ raise SSLError(
+ 'Fingerprint of invalid length: {0}'.format(fingerprint))
+
+ # We need encode() here for py32; works on py2 and p33.
+ fingerprint_bytes = unhexlify(fingerprint.encode())
+
+ cert_digest = hashfunc(cert).digest()
+
+ if not _const_compare_digest(cert_digest, fingerprint_bytes):
+ raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
+ .format(fingerprint, hexlify(cert_digest)))
+
+
+def resolve_cert_reqs(candidate):
+ """
+ Resolves the argument to a numeric constant, which can be passed to
+ the wrap_socket function/method from the ssl module.
+ Defaults to :data:`ssl.CERT_NONE`.
+ If given a string it is assumed to be the name of the constant in the
+ :mod:`ssl` module or its abbrevation.
+ (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+ If it's neither `None` nor a string we assume it is already the numeric
+ constant which can directly be passed to wrap_socket.
+ """
+ if candidate is None:
+ return CERT_NONE
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'CERT_' + candidate)
+ return res
+
+ return candidate
+
+
+def resolve_ssl_version(candidate):
+ """
+ like resolve_cert_reqs
+ """
+ if candidate is None:
+ return PROTOCOL_SSLv23
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, 'PROTOCOL_' + candidate)
+ return res
+
+ return candidate
+
+
+def create_urllib3_context(ssl_version=None, cert_reqs=None,
+ options=None, ciphers=None):
+ """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+ By default, this function does a lot of the same work that
+ ``ssl.create_default_context`` does on Python 3.4+. It:
+
+ - Disables SSLv2, SSLv3, and compression
+ - Sets a restricted set of server ciphers
+
+ If you wish to enable SSLv3, you can do::
+
+ from urllib3.util import ssl_
+ context = ssl_.create_urllib3_context()
+ context.options &= ~ssl_.OP_NO_SSLv3
+
+ You can do the same to enable compression (substituting ``COMPRESSION``
+ for ``SSLv3`` in the last line above).
+
+ :param ssl_version:
+ The desired protocol version to use. This will default to
+ PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+ the server and your installation of OpenSSL support.
+ :param cert_reqs:
+ Whether to require the certificate verification. This defaults to
+ ``ssl.CERT_REQUIRED``.
+ :param options:
+ Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+ ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
+ :param ciphers:
+ Which cipher suites to allow the server to select.
+ :returns:
+ Constructed SSLContext object with specified options
+ :rtype: SSLContext
+ """
+ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
+
+ # Setting the default here, as we may have no ssl module on import
+ cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+ if options is None:
+ options = 0
+ # SSLv2 is easily broken and is considered harmful and dangerous
+ options |= OP_NO_SSLv2
+ # SSLv3 has several problems and is now dangerous
+ options |= OP_NO_SSLv3
+ # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+ # (issue #309)
+ options |= OP_NO_COMPRESSION
+
+ context.options |= options
+
+ if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
+ context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+ context.verify_mode = cert_reqs
+ if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
+ return context
+
+
+def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
+ ca_certs=None, server_hostname=None,
+ ssl_version=None, ciphers=None, ssl_context=None,
+ ca_cert_dir=None):
+ """
+ All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+ the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+ :param server_hostname:
+ When SNI is supported, the expected hostname of the certificate
+ :param ssl_context:
+ A pre-made :class:`SSLContext` object. If none is provided, one will
+ be created using :func:`create_urllib3_context`.
+ :param ciphers:
+ A string of ciphers we wish the client to support. This is not
+ supported on Python 2.6 as the ssl module does not support it.
+ :param ca_cert_dir:
+ A directory containing CA certificates in multiple separate files, as
+ supported by OpenSSL's -CApath flag or the capath argument to
+ SSLContext.load_verify_locations().
+ """
+ context = ssl_context
+ if context is None:
+ # Note: This branch of code and all the variables in it are no longer
+ # used by urllib3 itself. We should consider deprecating and removing
+ # this code.
+ context = create_urllib3_context(ssl_version, cert_reqs,
+ ciphers=ciphers)
+
+ if ca_certs or ca_cert_dir:
+ try:
+ context.load_verify_locations(ca_certs, ca_cert_dir)
+ except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
+ raise SSLError(e)
+ # Py33 raises FileNotFoundError which subclasses OSError
+ # These are not equivalent unless we check the errno attribute
+ except OSError as e: # Platform-specific: Python 3.3 and beyond
+ if e.errno == errno.ENOENT:
+ raise SSLError(e)
+ raise
+ elif getattr(context, 'load_default_certs', None) is not None:
+ # try to load OS default certs; works well on Windows (require Python3.4+)
+ context.load_default_certs()
+
+ if certfile:
+ context.load_cert_chain(certfile, keyfile)
+ if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
+ return context.wrap_socket(sock, server_hostname=server_hostname)
+
+ warnings.warn(
+ 'An HTTPS request has been made, but the SNI (Subject Name '
+ 'Indication) extension to TLS is not available on this platform. '
+ 'This may cause the server to present an incorrect TLS '
+ 'certificate, which can cause validation failures. You can upgrade to '
+ 'a newer version of Python to solve this. For more information, see '
+ 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+ '#ssl-warnings',
+ SNIMissingWarning
+ )
+ return context.wrap_socket(sock)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
new file mode 100644
index 00000000..4041cf9b
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
@@ -0,0 +1,243 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+import time
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+ """ Timeout configuration.
+
+ Timeouts can be defined as a default for a pool::
+
+ timeout = Timeout(connect=2.0, read=7.0)
+ http = PoolManager(timeout=timeout)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+ Timeouts can be disabled by setting all the parameters to ``None``::
+
+ no_timeout = Timeout(connect=None, read=None)
+ response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+ :param total:
+ This combines the connect and read timeouts into one; the read timeout
+ will be set to the time leftover from the connect attempt. In the
+ event that both a connect timeout and a total are specified, or a read
+ timeout and a total are specified, the shorter timeout will be applied.
+
+ Defaults to None.
+
+ :type total: integer, float, or None
+
+ :param connect:
+ The maximum amount of time to wait for a connection attempt to a server
+ to succeed. Omitting the parameter will default the connect timeout to
+ the system default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout for connection attempts.
+
+ :type connect: integer, float, or None
+
+ :param read:
+ The maximum amount of time to wait between consecutive
+ read operations for a response from the server. Omitting
+ the parameter will default the read timeout to the system
+ default, probably `the global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout.
+
+ :type read: integer, float, or None
+
+ .. note::
+
+ Many factors can affect the total amount of time for urllib3 to return
+ an HTTP response.
+
+ For example, Python's DNS resolver does not obey the timeout specified
+ on the socket. Other factors that can affect total request time include
+ high CPU load, high swap, the program running at a low priority level,
+ or other behaviors.
+
+ In addition, the read and total timeouts only measure the time between
+ read operations on the socket connecting the client and the server,
+ not the total amount of time for the request to return a complete
+ response. For most requests, the timeout is raised because the server
+ has not sent the first byte in the specified time. This is not always
+ the case; if a server streams one byte every fifteen seconds, a timeout
+ of 20 seconds will not trigger, even though the request will take
+ several minutes to complete.
+
+ If your goal is to cut off any request after a set amount of wall clock
+ time, consider having a second "watcher" thread to cut off a slow
+ request.
+ """
+
+ #: A sentinel object representing the default timeout value
+ DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+ def __init__(self, total=None, connect=_Default, read=_Default):
+ self._connect = self._validate_timeout(connect, 'connect')
+ self._read = self._validate_timeout(read, 'read')
+ self.total = self._validate_timeout(total, 'total')
+ self._start_connect = None
+
+ def __str__(self):
+ return '%s(connect=%r, read=%r, total=%r)' % (
+ type(self).__name__, self._connect, self._read, self.total)
+
+ @classmethod
+ def _validate_timeout(cls, value, name):
+ """ Check that a timeout attribute is valid.
+
+ :param value: The timeout value to validate
+ :param name: The name of the timeout attribute to validate. This is
+ used to specify in error messages.
+ :return: The validated and casted version of the given value.
+ :raises ValueError: If it is a numeric value less than or equal to
+ zero, or the type is not an integer, float, or None.
+ """
+ if value is _Default:
+ return cls.DEFAULT_TIMEOUT
+
+ if value is None or value is cls.DEFAULT_TIMEOUT:
+ return value
+
+ if isinstance(value, bool):
+ raise ValueError("Timeout cannot be a boolean value. It must "
+ "be an int, float or None.")
+ try:
+ float(value)
+ except (TypeError, ValueError):
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ try:
+ if value <= 0:
+ raise ValueError("Attempted to set %s timeout to %s, but the "
+ "timeout cannot be set to a value less "
+ "than or equal to 0." % (name, value))
+ except TypeError: # Python 3
+ raise ValueError("Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value))
+
+ return value
+
+ @classmethod
+ def from_float(cls, timeout):
+ """ Create a new Timeout from a legacy timeout value.
+
+ The timeout value used by httplib.py sets the same timeout on the
+ connect(), and recv() socket requests. This creates a :class:`Timeout`
+ object that sets the individual timeouts to the ``timeout`` value
+ passed to this function.
+
+ :param timeout: The legacy timeout value.
+ :type timeout: integer, float, sentinel default object, or None
+ :return: Timeout object
+ :rtype: :class:`Timeout`
+ """
+ return Timeout(read=timeout, connect=timeout)
+
+ def clone(self):
+ """ Create a copy of the timeout object
+
+ Timeout properties are stored per-pool but each request needs a fresh
+ Timeout object to ensure each one has its own start/stop configured.
+
+ :return: a copy of the timeout object
+ :rtype: :class:`Timeout`
+ """
+ # We can't use copy.deepcopy because that will also create a new object
+ # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+ # detect the user default.
+ return Timeout(connect=self._connect, read=self._read,
+ total=self.total)
+
+ def start_connect(self):
+ """ Start the timeout clock, used during a connect() attempt
+
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to start a timer that has been started already.
+ """
+ if self._start_connect is not None:
+ raise TimeoutStateError("Timeout timer has already been started.")
+ self._start_connect = current_time()
+ return self._start_connect
+
+ def get_connect_duration(self):
+ """ Gets the time elapsed since the call to :meth:`start_connect`.
+
+ :return: Elapsed time.
+ :rtype: float
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to get duration for a timer that hasn't been started.
+ """
+ if self._start_connect is None:
+ raise TimeoutStateError("Can't get connect duration for timer "
+ "that has not started.")
+ return current_time() - self._start_connect
+
+ @property
+ def connect_timeout(self):
+ """ Get the value to use when setting a connection timeout.
+
+ This will be a positive float or integer, the value None
+ (never timeout), or the default system timeout.
+
+ :return: Connect timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ """
+ if self.total is None:
+ return self._connect
+
+ if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+ return self.total
+
+ return min(self._connect, self.total)
+
+ @property
+ def read_timeout(self):
+ """ Get the value for the read timeout.
+
+ This assumes some time has elapsed in the connection timeout and
+ computes the read timeout appropriately.
+
+ If self.total is set, the read timeout is dependent on the amount of
+ time taken by the connect timeout. If the connection time has not been
+ established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+ raised.
+
+ :return: Value to use for the read timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+ has not yet been called on this object.
+ """
+ if (self.total is not None and
+ self.total is not self.DEFAULT_TIMEOUT and
+ self._read is not None and
+ self._read is not self.DEFAULT_TIMEOUT):
+ # In case the connect timeout has not yet been established.
+ if self._start_connect is None:
+ return self._read
+ return max(0, min(self.total - self.get_connect_duration(),
+ self._read))
+ elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+ return max(0, self.total - self.get_connect_duration())
+ else:
+ return self._read
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
new file mode 100644
index 00000000..99fd6534
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
@@ -0,0 +1,231 @@
+# SPDX-License-Identifier: MIT
+from __future__ import absolute_import
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+
+
+url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ('http', 'https', None)
+
+
+class Url(namedtuple('Url', url_attrs)):
+ """
+ Datastructure for representing an HTTP URL. Used as a return value for
+ :func:`parse_url`. Both the scheme and host are normalized as they are
+ both case-insensitive according to RFC 3986.
+ """
+ __slots__ = ()
+
+ def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
+ query=None, fragment=None):
+ if path and not path.startswith('/'):
+ path = '/' + path
+ if scheme:
+ scheme = scheme.lower()
+ if host and scheme in NORMALIZABLE_SCHEMES:
+ host = host.lower()
+ return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
+ query, fragment)
+
+ @property
+ def hostname(self):
+ """For backwards-compatibility with urlparse. We're nice like that."""
+ return self.host
+
+ @property
+ def request_uri(self):
+ """Absolute path including the query string."""
+ uri = self.path or '/'
+
+ if self.query is not None:
+ uri += '?' + self.query
+
+ return uri
+
+ @property
+ def netloc(self):
+ """Network location including host and port"""
+ if self.port:
+ return '%s:%d' % (self.host, self.port)
+ return self.host
+
+ @property
+ def url(self):
+ """
+ Convert self into a url
+
+ This function should more or less round-trip with :func:`.parse_url`. The
+ returned url may not be exactly the same as the url inputted to
+ :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+ with a blank port will have : removed).
+
+ Example: ::
+
+ >>> U = parse_url('http://google.com/mail/')
+ >>> U.url
+ 'http://google.com/mail/'
+ >>> Url('http', 'username:password', 'host.com', 80,
+ ... '/path', 'query', 'fragment').url
+ 'http://username:password@host.com:80/path?query#fragment'
+ """
+ scheme, auth, host, port, path, query, fragment = self
+ url = ''
+
+ # We use "is not None" we want things to happen with empty strings (or 0 port)
+ if scheme is not None:
+ url += scheme + '://'
+ if auth is not None:
+ url += auth + '@'
+ if host is not None:
+ url += host
+ if port is not None:
+ url += ':' + str(port)
+ if path is not None:
+ url += path
+ if query is not None:
+ url += '?' + query
+ if fragment is not None:
+ url += '#' + fragment
+
+ return url
+
+ def __str__(self):
+ return self.url
+
+
+def split_first(s, delims):
+ """
+ Given a string and an iterable of delimiters, split on the first found
+ delimiter. Return two split parts and the matched delimiter.
+
+ If not found, then the first part is the full input string.
+
+ Example::
+
+ >>> split_first('foo/bar?baz', '?/=')
+ ('foo', 'bar?baz', '/')
+ >>> split_first('foo/bar?baz', '123')
+ ('foo/bar?baz', '', None)
+
+ Scales linearly with number of delims. Not ideal for large number of delims.
+ """
+ min_idx = None
+ min_delim = None
+ for d in delims:
+ idx = s.find(d)
+ if idx < 0:
+ continue
+
+ if min_idx is None or idx < min_idx:
+ min_idx = idx
+ min_delim = d
+
+ if min_idx is None or min_idx < 0:
+ return s, '', None
+
+ return s[:min_idx], s[min_idx + 1:], min_delim
+
+
+def parse_url(url):
+ """
+ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+ performed to parse incomplete urls. Fields not provided will be None.
+
+ Partly backwards-compatible with :mod:`urlparse`.
+
+ Example::
+
+ >>> parse_url('http://google.com/mail/')
+ Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+ >>> parse_url('google.com:80')
+ Url(scheme=None, host='google.com', port=80, path=None, ...)
+ >>> parse_url('/foo?bar')
+ Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+ """
+
+ # While this code has overlap with stdlib's urlparse, it is much
+ # simplified for our needs and less annoying.
+ # Additionally, this implementations does silly things to be optimal
+ # on CPython.
+
+ if not url:
+ # Empty
+ return Url()
+
+ scheme = None
+ auth = None
+ host = None
+ port = None
+ path = None
+ fragment = None
+ query = None
+
+ # Scheme
+ if '://' in url:
+ scheme, url = url.split('://', 1)
+
+ # Find the earliest Authority Terminator
+ # (http://tools.ietf.org/html/rfc3986#section-3.2)
+ url, path_, delim = split_first(url, ['/', '?', '#'])
+
+ if delim:
+ # Reassemble the path
+ path = delim + path_
+
+ # Auth
+ if '@' in url:
+ # Last '@' denotes end of auth part
+ auth, url = url.rsplit('@', 1)
+
+ # IPv6
+ if url and url[0] == '[':
+ host, url = url.split(']', 1)
+ host += ']'
+
+ # Port
+ if ':' in url:
+ _host, port = url.split(':', 1)
+
+ if not host:
+ host = _host
+
+ if port:
+ # If given, ports must be integers. No whitespace, no plus or
+ # minus prefixes, no non-integer digits such as ^2 (superscript).
+ if not port.isdigit():
+ raise LocationParseError(url)
+ try:
+ port = int(port)
+ except ValueError:
+ raise LocationParseError(url)
+ else:
+ # Blank ports are cool, too. (rfc3986#section-3.2.3)
+ port = None
+
+ elif not host and url:
+ host = url
+
+ if not path:
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+ # Fragment
+ if '#' in path:
+ path, fragment = path.split('#', 1)
+
+ # Query
+ if '?' in path:
+ path, query = path.split('?', 1)
+
+ return Url(scheme, auth, host, port, path, query, fragment)
+
+
+def get_host(url):
+ """
+ Deprecated. Use :func:`parse_url` instead.
+ """
+ p = parse_url(url)
+ return p.scheme or 'http', p.hostname, p.port
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
new file mode 100644
index 00000000..21e72979
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: MIT
+from .selectors import (
+ HAS_SELECT,
+ DefaultSelector,
+ EVENT_READ,
+ EVENT_WRITE
+)
+
+
+def _wait_for_io_events(socks, events, timeout=None):
+ """ Waits for IO events to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be interacted with immediately. """
+ if not HAS_SELECT:
+ raise ValueError('Platform does not have a selector')
+ if not isinstance(socks, list):
+ # Probably just a single socket.
+ if hasattr(socks, "fileno"):
+ socks = [socks]
+ # Otherwise it might be a non-list iterable.
+ else:
+ socks = list(socks)
+ with DefaultSelector() as selector:
+ for sock in socks:
+ selector.register(sock, events)
+ return [key[0].fileobj for key in
+ selector.select(timeout) if key[1] & events]
+
+
+def wait_for_read(socks, timeout=None):
+ """ Waits for reading to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be read from immediately. """
+ return _wait_for_io_events(socks, EVENT_READ, timeout)
+
+
+def wait_for_write(socks, timeout=None):
+ """ Waits for writing to be available from a list of sockets
+ or optionally a single socket if passed in. Returns a list of
+ sockets that can be written to immediately. """
+ return _wait_for_io_events(socks, EVENT_WRITE, timeout)
diff --git a/collectors/python.d.plugin/rethinkdbs/Makefile.inc b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
new file mode 100644
index 00000000..dec60446
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rethinkdbs/rethinkdbs.chart.py
+dist_pythonconfig_DATA += rethinkdbs/rethinkdbs.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rethinkdbs/README.md rethinkdbs/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
new file mode 120000
index 00000000..78ddcfa1
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -0,0 +1 @@
+integrations/rethinkdb.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md b/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md
new file mode 100644
index 00000000..ab51c051
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md
@@ -0,0 +1,190 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rethinkdbs/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rethinkdbs/metadata.yaml"
+sidebar_label: "RethinkDB"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# RethinkDB
+
+
+<img src="https://netdata.cloud/img/rethinkdb.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: rethinkdbs
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors metrics about RethinkDB clusters and database servers.
+
+It uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+When no configuration file is found, the collector tries to connect to 127.0.0.1:28015.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per RethinkDB instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| rethinkdb.cluster_connected_servers | connected, missing | servers |
+| rethinkdb.cluster_clients_active | active | clients |
+| rethinkdb.cluster_queries | queries | queries/s |
+| rethinkdb.cluster_documents | reads, writes | documents/s |
+
+### Per database server
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| rethinkdb.client_connections | connections | connections |
+| rethinkdb.clients_active | active | clients |
+| rethinkdb.queries | queries | queries/s |
+| rethinkdb.documents | reads, writes | documents/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Required python module
+
+The collector requires the `rethinkdb` python module to be installed.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/rethinkdbs.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/rethinkdbs.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| host | Hostname or ip of the RethinkDB server. | localhost | no |
+| port | Port to connect to the RethinkDB server. | 28015 | no |
+| user | The username to use to connect to the RethinkDB server. | admin | no |
+| password | The password to use to connect to the RethinkDB server. | | no |
+| timeout | Set a connect timeout to the RethinkDB server. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Local RethinkDB server
+
+An example of a configuration for a local RethinkDB server
+
+```yaml
+localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 28015
+ user: "user"
+ password: "pass"
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `rethinkdbs` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin rethinkdbs debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/rethinkdbs/metadata.yaml b/collectors/python.d.plugin/rethinkdbs/metadata.yaml
new file mode 100644
index 00000000..bbc50eac
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/metadata.yaml
@@ -0,0 +1,188 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: rethinkdbs
+ monitored_instance:
+ name: RethinkDB
+ link: 'https://rethinkdb.com/'
+ categories:
+ - data-collection.database-servers
+ icon_filename: 'rethinkdb.png'
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ''
+ keywords:
+ - rethinkdb
+ - database
+ - db
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: 'This collector monitors metrics about RethinkDB clusters and database servers.'
+ method_description: 'It uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.'
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ''
+ default_behavior:
+ auto_detection:
+ description: 'When no configuration file is found, the collector tries to connect to 127.0.0.1:28015.'
+ limits:
+ description: ''
+ performance_impact:
+ description: ''
+ setup:
+ prerequisites:
+ list:
+ - title: 'Required python module'
+ description: 'The collector requires the `rethinkdb` python module to be installed.'
+ configuration:
+ file:
+ name: python.d/rethinkdbs.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ''
+ required: false
+ - name: host
+ description: Hostname or ip of the RethinkDB server.
+ default_value: 'localhost'
+ required: false
+ - name: port
+ description: Port to connect to the RethinkDB server.
+ default_value: '28015'
+ required: false
+ - name: user
+ description: The username to use to connect to the RethinkDB server.
+ default_value: 'admin'
+ required: false
+ - name: password
+ description: The password to use to connect to the RethinkDB server.
+ default_value: ''
+ required: false
+ - name: timeout
+ description: Set a connect timeout to the RethinkDB server.
+ default_value: '2'
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Local RethinkDB server
+ description: An example of a configuration for a local RethinkDB server
+ folding:
+ enabled: false
+ config: |
+ localhost:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 28015
+ user: "user"
+ password: "pass"
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: rethinkdb.cluster_connected_servers
+ description: Connected Servers
+ unit: "servers"
+ chart_type: stacked
+ dimensions:
+ - name: connected
+ - name: missing
+ - name: rethinkdb.cluster_clients_active
+ description: Active Clients
+ unit: "clients"
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: rethinkdb.cluster_queries
+ description: Queries
+ unit: "queries/s"
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: rethinkdb.cluster_documents
+ description: Documents
+ unit: "documents/s"
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: database server
+ description: ""
+ labels: []
+ metrics:
+ - name: rethinkdb.client_connections
+ description: Client Connections
+ unit: "connections"
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: rethinkdb.clients_active
+ description: Active Clients
+ unit: "clients"
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: rethinkdb.queries
+ description: Queries
+ unit: "queries/s"
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: rethinkdb.documents
+ description: Documents
+ unit: "documents/s"
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
new file mode 100644
index 00000000..e3fbc363
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Description: rethinkdb netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+try:
+ import rethinkdb as rdb
+
+ HAS_RETHINKDB = True
+except ImportError:
+ HAS_RETHINKDB = False
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+ORDER = [
+ 'cluster_connected_servers',
+ 'cluster_clients_active',
+ 'cluster_queries',
+ 'cluster_documents',
+]
+
+
+def cluster_charts():
+ return {
+ 'cluster_connected_servers': {
+ 'options': [None, 'Connected Servers', 'servers', 'cluster', 'rethinkdb.cluster_connected_servers',
+ 'stacked'],
+ 'lines': [
+ ['cluster_servers_connected', 'connected'],
+ ['cluster_servers_missing', 'missing'],
+ ]
+ },
+ 'cluster_clients_active': {
+ 'options': [None, 'Active Clients', 'clients', 'cluster', 'rethinkdb.cluster_clients_active',
+ 'line'],
+ 'lines': [
+ ['cluster_clients_active', 'active'],
+ ]
+ },
+ 'cluster_queries': {
+ 'options': [None, 'Queries', 'queries/s', 'cluster', 'rethinkdb.cluster_queries', 'line'],
+ 'lines': [
+ ['cluster_queries_per_sec', 'queries'],
+ ]
+ },
+ 'cluster_documents': {
+ 'options': [None, 'Documents', 'documents/s', 'cluster', 'rethinkdb.cluster_documents', 'line'],
+ 'lines': [
+ ['cluster_read_docs_per_sec', 'reads'],
+ ['cluster_written_docs_per_sec', 'writes'],
+ ]
+ },
+ }
+
+
+def server_charts(n):
+ o = [
+ '{0}_client_connections'.format(n),
+ '{0}_clients_active'.format(n),
+ '{0}_queries'.format(n),
+ '{0}_documents'.format(n),
+ ]
+ f = 'server {0}'.format(n)
+
+ c = {
+ o[0]: {
+ 'options': [None, 'Client Connections', 'connections', f, 'rethinkdb.client_connections', 'line'],
+ 'lines': [
+ ['{0}_client_connections'.format(n), 'connections'],
+ ]
+ },
+ o[1]: {
+ 'options': [None, 'Active Clients', 'clients', f, 'rethinkdb.clients_active', 'line'],
+ 'lines': [
+ ['{0}_clients_active'.format(n), 'active'],
+ ]
+ },
+ o[2]: {
+ 'options': [None, 'Queries', 'queries/s', f, 'rethinkdb.queries', 'line'],
+ 'lines': [
+ ['{0}_queries_total'.format(n), 'queries', 'incremental'],
+ ]
+ },
+ o[3]: {
+ 'options': [None, 'Documents', 'documents/s', f, 'rethinkdb.documents', 'line'],
+ 'lines': [
+ ['{0}_read_docs_total'.format(n), 'reads', 'incremental'],
+ ['{0}_written_docs_total'.format(n), 'writes', 'incremental'],
+ ]
+ },
+ }
+
+ return o, c
+
+
+class Cluster:
+ def __init__(self, raw):
+ self.raw = raw
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ return {
+ 'cluster_clients_active': qe['clients_active'],
+ 'cluster_queries_per_sec': qe['queries_per_sec'],
+ 'cluster_read_docs_per_sec': qe['read_docs_per_sec'],
+ 'cluster_written_docs_per_sec': qe['written_docs_per_sec'],
+ 'cluster_servers_connected': 0,
+ 'cluster_servers_missing': 0,
+ }
+
+
+class Server:
+ def __init__(self, raw):
+ self.name = raw['server']
+ self.raw = raw
+
+ def error(self):
+ return self.raw.get('error')
+
+ def data(self):
+ qe = self.raw['query_engine']
+
+ d = {
+ 'client_connections': qe['client_connections'],
+ 'clients_active': qe['clients_active'],
+ 'queries_total': qe['queries_total'],
+ 'read_docs_total': qe['read_docs_total'],
+ 'written_docs_total': qe['written_docs_total'],
+ }
+
+ return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d)
+
+
+# https://pypi.org/project/rethinkdb/2.4.0/
+# rdb.RethinkDB() can be used as rdb drop in replacement.
+# https://github.com/rethinkdb/rethinkdb-python#quickstart
+def get_rethinkdb():
+ if hasattr(rdb, 'RethinkDB'):
+ return rdb.RethinkDB()
+ return rdb
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = cluster_charts()
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 28015)
+ self.user = self.configuration.get('user', 'admin')
+ self.password = self.configuration.get('password')
+ self.timeout = self.configuration.get('timeout', 2)
+ self.rdb = None
+ self.conn = None
+ self.alive = True
+
+ def check(self):
+ if not HAS_RETHINKDB:
+ self.error('"rethinkdb" module is needed to use rethinkdbs.py')
+ return False
+
+ self.debug("rethinkdb driver version {0}".format(rdb.__version__))
+ self.rdb = get_rethinkdb()
+
+ if not self.connect():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ for v in stats[1:]:
+ if get_id(v) == 'server':
+ o, c = server_charts(v['server'])
+ self.order.extend(o)
+ self.definitions.update(c)
+
+ return True
+
+ def get_data(self):
+ if not self.is_alive():
+ return None
+
+ stats = self.get_stats()
+
+ if not stats:
+ return None
+
+ data = dict()
+
+ # cluster
+ data.update(Cluster(stats[0]).data())
+
+ # servers
+ for v in stats[1:]:
+ if get_id(v) != 'server':
+ continue
+
+ s = Server(v)
+
+ if s.error():
+ data['cluster_servers_missing'] += 1
+ else:
+ data['cluster_servers_connected'] += 1
+ data.update(s.data())
+
+ return data
+
+ def get_stats(self):
+ try:
+ return list(self.rdb.db('rethinkdb').table('stats').run(self.conn).items)
+ except rdb.errors.ReqlError:
+ self.alive = False
+ return None
+
+ def connect(self):
+ try:
+ self.conn = self.rdb.connect(
+ host=self.host,
+ port=self.port,
+ user=self.user,
+ password=self.password,
+ timeout=self.timeout,
+ )
+ self.alive = True
+ return True
+ except rdb.errors.ReqlError as error:
+ self.error('Connection to {0}:{1} failed: {2}'.format(self.host, self.port, error))
+ return False
+
+ def reconnect(self):
+ # The connection is already closed after rdb.errors.ReqlError,
+ # so we do not need to call conn.close()
+ if self.connect():
+ return True
+ return False
+
+ def is_alive(self):
+ if not self.alive:
+ return self.reconnect()
+ return True
+
+
+def get_id(v):
+ return v['id'][0]
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
new file mode 100644
index 00000000..d671acbb
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
@@ -0,0 +1,76 @@
+# netdata python.d.plugin configuration for rethinkdb
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, rethinkdb also supports the following:
+#
+# host: IP or HOSTNAME # default is 'localhost'
+# port: PORT # default is 28015
+# user: USERNAME # default is 'admin'
+# password: PASSWORD # not set by default
+# timeout: TIMEOUT # default is 2
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ name: 'local'
+ host: 'localhost'
diff --git a/collectors/python.d.plugin/retroshare/Makefile.inc b/collectors/python.d.plugin/retroshare/Makefile.inc
new file mode 100644
index 00000000..891193e6
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += retroshare/retroshare.chart.py
+dist_pythonconfig_DATA += retroshare/retroshare.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += retroshare/README.md retroshare/Makefile.inc
+
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
new file mode 120000
index 00000000..4e4c2cdb
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -0,0 +1 @@
+integrations/retroshare.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/retroshare/integrations/retroshare.md b/collectors/python.d.plugin/retroshare/integrations/retroshare.md
new file mode 100644
index 00000000..4fc003c6
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/integrations/retroshare.md
@@ -0,0 +1,191 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/retroshare/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/retroshare/metadata.yaml"
+sidebar_label: "RetroShare"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Media Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# RetroShare
+
+
+<img src="https://netdata.cloud/img/retroshare.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: retroshare
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics.
+
+It connects to the RetroShare web interface to gather metrics.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per RetroShare instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| retroshare.bandwidth | Upload, Download | kilobits/s |
+| retroshare.peers | All friends, Connected friends | peers |
+| retroshare.dht | DHT nodes estimated, RS nodes estimated | peers |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ retroshare_dht_working ](https://github.com/netdata/netdata/blob/master/health/health.d/retroshare.conf) | retroshare.dht | number of DHT peers |
+
+
+## Setup
+
+### Prerequisites
+
+#### RetroShare web interface
+
+RetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/retroshare.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/retroshare.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no |
+
+</details>
+
+#### Examples
+
+##### Local RetroShare Web UI
+
+A basic configuration for a RetroShare server running on localhost.
+
+<details><summary>Config</summary>
+
+```yaml
+localhost:
+ name: 'local retroshare'
+ url: 'http://localhost:9090'
+
+```
+</details>
+
+##### Remote RetroShare Web UI
+
+A basic configuration for a remote RetroShare server.
+
+<details><summary>Config</summary>
+
+```yaml
+remote:
+ name: 'remote retroshare'
+ url: 'http://1.2.3.4:9090'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `retroshare` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin retroshare debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/retroshare/metadata.yaml b/collectors/python.d.plugin/retroshare/metadata.yaml
new file mode 100644
index 00000000..0a769616
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/metadata.yaml
@@ -0,0 +1,144 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: retroshare
+ monitored_instance:
+ name: RetroShare
+ link: "https://retroshare.cc/"
+ categories:
+ - data-collection.media-streaming-servers
+ icon_filename: "retroshare.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - retroshare
+ - p2p
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics."
+ method_description: "It connects to the RetroShare web interface to gather metrics."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "The collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "RetroShare web interface"
+ description: |
+ RetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.
+ configuration:
+ file:
+ name: python.d/retroshare.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: url
+ description: The URL to the RetroShare Web UI.
+ default_value: "http://localhost:9090"
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Local RetroShare Web UI
+ description: A basic configuration for a RetroShare server running on localhost.
+ config: |
+ localhost:
+ name: 'local retroshare'
+ url: 'http://localhost:9090'
+ - name: Remote RetroShare Web UI
+ description: A basic configuration for a remote RetroShare server.
+ config: |
+ remote:
+ name: 'remote retroshare'
+ url: 'http://1.2.3.4:9090'
+
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: retroshare_dht_working
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/retroshare.conf
+ metric: retroshare.dht
+ info: number of DHT peers
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: retroshare.bandwidth
+ description: RetroShare Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: Upload
+ - name: Download
+ - name: retroshare.peers
+ description: RetroShare Peers
+ unit: "peers"
+ chart_type: line
+ dimensions:
+ - name: All friends
+ - name: Connected friends
+ - name: retroshare.dht
+ description: Retroshare DHT
+ unit: "peers"
+ chart_type: line
+ dimensions:
+ - name: DHT nodes estimated
+ - name: RS nodes estimated
diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
new file mode 100644
index 00000000..3f9593e9
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# Description: RetroShare netdata python.d module
+# Authors: sehraf
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'bandwidth',
+ 'peers',
+ 'dht',
+]
+
+CHARTS = {
+ 'bandwidth': {
+ 'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
+ 'lines': [
+ ['bandwidth_up_kb', 'Upload'],
+ ['bandwidth_down_kb', 'Download']
+ ]
+ },
+ 'peers': {
+ 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
+ 'lines': [
+ ['peers_all', 'All friends'],
+ ['peers_connected', 'Connected friends']
+ ]
+ },
+ 'dht': {
+ 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
+ 'lines': [
+ ['dht_size_all', 'DHT nodes estimated'],
+ ['dht_size_rs', 'RS nodes estimated']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.baseurl = self.configuration.get('url', 'http://localhost:9090')
+
+ def _get_stats(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ try:
+ raw = self._get_raw_data()
+ parsed = json.loads(raw)
+ if str(parsed['returncode']) != 'ok':
+ return None
+ except (TypeError, ValueError):
+ return None
+
+ return parsed['data'][0]
+
+ def _get_data(self):
+ """
+ Get data from API
+ :return: dict
+ """
+ self.url = self.baseurl + '/api/v2/stats'
+ data = self._get_stats()
+ if data is None:
+ return None
+
+ data['bandwidth_up_kb'] = data['bandwidth_up_kb'] * -1
+ if data['dht_active'] is False:
+ data['dht_size_all'] = None
+ data['dht_size_rs'] = None
+
+ return data
diff --git a/collectors/python.d.plugin/retroshare/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf
new file mode 100644
index 00000000..3d0af538
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/retroshare.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for RetroShare
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, RetroShare also supports the following:
+#
+# - url: 'url' # the URL to the WebUI
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ url: 'http://localhost:9090'
diff --git a/collectors/python.d.plugin/riakkv/Makefile.inc b/collectors/python.d.plugin/riakkv/Makefile.inc
new file mode 100644
index 00000000..87d29f82
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += riakkv/riakkv.chart.py
+dist_pythonconfig_DATA += riakkv/riakkv.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += riakkv/README.md riakkv/Makefile.inc
+
diff --git a/collectors/python.d.plugin/riakkv/README.md b/collectors/python.d.plugin/riakkv/README.md
new file mode 120000
index 00000000..f43ece09
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/README.md
@@ -0,0 +1 @@
+integrations/riakkv.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/riakkv/integrations/riakkv.md b/collectors/python.d.plugin/riakkv/integrations/riakkv.md
new file mode 100644
index 00000000..2e8279bc
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/integrations/riakkv.md
@@ -0,0 +1,220 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/riakkv/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/riakkv/metadata.yaml"
+sidebar_label: "RiakKV"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# RiakKV
+
+
+<img src="https://netdata.cloud/img/riak.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: riakkv
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors RiakKV metrics about throughput, latency, resources and more.'
+
+
+This collector reads the database stats from the `/stats` endpoint.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per RiakKV instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| riak.kv.throughput | gets, puts | operations/s |
+| riak.dt.vnode_updates | counters, sets, maps | operations/s |
+| riak.search | queries | queries/s |
+| riak.search.documents | indexed | documents/s |
+| riak.consistent.operations | gets, puts | operations/s |
+| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |
+| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |
+| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |
+| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |
+| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |
+| riak.search.latency.query | median, min, 95, 99, 999, max | ms |
+| riak.search.latency.index | median, min, 95, 99, 999, max | ms |
+| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |
+| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |
+| riak.vm | processes | total |
+| riak.vm.memory.processes | allocated, used | MB |
+| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |
+| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |
+| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |
+| riak.search.index | errors | errors |
+| riak.core.protobuf_connections | active | connections |
+| riak.core.repairs | read | repairs |
+| riak.core.fsm_active | get, put, secondary index, list keys | fsms |
+| riak.core.fsm_rejected | get, put | fsms |
+| riak.search.index | bad_entry, extract_fail | writes |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ riakkv_1h_kv_get_mean_latency ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to client over the last hour |
+| [ riakkv_kv_get_slow ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |
+| [ riakkv_1h_kv_put_mean_latency ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last hour |
+| [ riakkv_kv_put_slow ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |
+| [ riakkv_vm_high_process_count ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.vm | number of processes running in the Erlang VM |
+| [ riakkv_list_keys_active ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.core.fsm_active | number of currently running list keys finite state machines |
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure RiakKV to enable /stats endpoint
+
+You can follow the RiakKV configuration reference documentation for how to enable this.
+
+Source : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/riakkv.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/riakkv.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| url | The url of the server | no | yes |
+
+</details>
+
+#### Examples
+
+##### Basic (default)
+
+A basic example configuration per job
+
+```yaml
+local:
+url: 'http://localhost:8098/stats'
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+local:
+ url: 'http://localhost:8098/stats'
+
+remote:
+ url: 'http://192.0.2.1:8098/stats'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `riakkv` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin riakkv debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/riakkv/metadata.yaml b/collectors/python.d.plugin/riakkv/metadata.yaml
new file mode 100644
index 00000000..441937f8
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/metadata.yaml
@@ -0,0 +1,358 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: riakkv
+ monitored_instance:
+ name: RiakKV
+ link: "https://riak.com/products/riak-kv/index.html"
+ categories:
+ - data-collection.database-servers
+ icon_filename: "riak.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - database
+ - nosql
+ - big data
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors RiakKV metrics about throughput, latency, resources and more.'
+ method_description: "This collector reads the database stats from the `/stats` endpoint."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "If the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Configure RiakKV to enable /stats endpoint
+ description: |
+ You can follow the RiakKV configuration reference documentation for how to enable this.
+
+ Source : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces
+ configuration:
+ file:
+ name: "python.d/riakkv.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: url
+ description: The url of the server
+ default_value: no
+ required: true
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic (default)
+ folding:
+ enabled: false
+ description: A basic example configuration per job
+ config: |
+ local:
+ url: 'http://localhost:8098/stats'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ local:
+ url: 'http://localhost:8098/stats'
+
+ remote:
+ url: 'http://192.0.2.1:8098/stats'
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: riakkv_1h_kv_get_mean_latency
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
+ metric: riak.kv.latency.get
+ info: average time between reception of client GET request and subsequent response to client over the last hour
+ - name: riakkv_kv_get_slow
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
+ metric: riak.kv.latency.get
+ info: average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour
+ - name: riakkv_1h_kv_put_mean_latency
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
+ metric: riak.kv.latency.put
+ info: average time between reception of client PUT request and subsequent response to the client over the last hour
+ - name: riakkv_kv_put_slow
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
+ metric: riak.kv.latency.put
+ info: average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour
+ - name: riakkv_vm_high_process_count
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
+ metric: riak.vm
+ info: number of processes running in the Erlang VM
+ - name: riakkv_list_keys_active
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
+ metric: riak.core.fsm_active
+ info: number of currently running list keys finite state machines
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: riak.kv.throughput
+ description: Reads & writes coordinated by this node
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: gets
+ - name: puts
+ - name: riak.dt.vnode_updates
+ description: Update operations coordinated by local vnodes by data type
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: counters
+ - name: sets
+ - name: maps
+ - name: riak.search
+ description: Search queries on the node
+ unit: "queries/s"
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: riak.search.documents
+ description: Documents indexed by search
+ unit: "documents/s"
+ chart_type: line
+ dimensions:
+ - name: indexed
+ - name: riak.consistent.operations
+ description: Consistent node operations
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: gets
+ - name: puts
+ - name: riak.kv.latency.get
+ description: Time between reception of a client GET request and subsequent response to client
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.kv.latency.put
+ description: Time between reception of a client PUT request and subsequent response to client
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.dt.latency.counter_merge
+ description: Time it takes to perform an Update Counter operation
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.dt.latency.set_merge
+ description: Time it takes to perform an Update Set operation
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.dt.latency.map_merge
+ description: Time it takes to perform an Update Map operation
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.search.latency.query
+ description: Search query latency
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: median
+ - name: min
+ - name: "95"
+ - name: "99"
+ - name: "999"
+ - name: max
+ - name: riak.search.latency.index
+ description: Time it takes Search to index a new document
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: median
+ - name: min
+ - name: "95"
+ - name: "99"
+ - name: "999"
+ - name: max
+ - name: riak.consistent.latency.get
+ description: Strongly consistent read latency
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.consistent.latency.put
+ description: Strongly consistent write latency
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.vm
+ description: Total processes running in the Erlang VM
+ unit: "total"
+ chart_type: line
+ dimensions:
+ - name: processes
+ - name: riak.vm.memory.processes
+ description: Memory allocated & used by Erlang processes
+ unit: "MB"
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: used
+ - name: riak.kv.siblings_encountered.get
+ description: Number of siblings encountered during GET operations by this node during the past minute
+ unit: "siblings"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.kv.objsize.get
+ description: Object size encountered by this node during the past minute
+ unit: "KB"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.search.vnodeq_size
+ description: Number of unprocessed messages in the vnode message queues of Search on this node in the past minute
+ unit: "messages"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.search.index
+ description: Number of document index errors encountered by Search
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: errors
+ - name: riak.core.protobuf_connections
+ description: Protocol buffer connections by status
+ unit: "connections"
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: riak.core.repairs
+ description: Number of repair operations this node has coordinated
+ unit: "repairs"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: riak.core.fsm_active
+ description: Active finite state machines by kind
+ unit: "fsms"
+ chart_type: line
+ dimensions:
+ - name: get
+ - name: put
+ - name: secondary index
+ - name: list keys
+ - name: riak.core.fsm_rejected
+ description: Finite state machines being rejected by Sidejobs overload protection
+ unit: "fsms"
+ chart_type: line
+ dimensions:
+ - name: get
+ - name: put
+ - name: riak.search.index
+ description: Number of writes to Search failed due to bad data format by reason
+ unit: "writes"
+ chart_type: line
+ dimensions:
+ - name: bad_entry
+ - name: extract_fail
diff --git a/collectors/python.d.plugin/riakkv/riakkv.chart.py b/collectors/python.d.plugin/riakkv/riakkv.chart.py
new file mode 100644
index 00000000..c390c8bc
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/riakkv.chart.py
@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+# Description: riak netdata python.d module
+#
+# See also:
+# https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# Riak updates the metrics at the /stats endpoint every 1 second.
+# If we use `update_every = 1` here, that means we might get weird jitter in the graph,
+# so the default is set to 2 seconds to prevent it.
+update_every = 2
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+ # Throughput metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
+ # Collected in totals.
+ "kv.node_operations", # K/V node operations.
+ "dt.vnode_updates", # Data type vnode updates.
+ "search.queries", # Search queries on the node.
+ "search.documents", # Documents indexed by Search.
+ "consistent.operations", # Consistent node operations.
+
+ # Latency metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
+ # Collected for the past minute in milliseconds,
+ # returned from riak in microseconds.
+ "kv.latency.get", # K/V GET FSM traversal latency.
+ "kv.latency.put", # K/V PUT FSM traversal latency.
+ "dt.latency.counter", # Update Counter Data type latency.
+ "dt.latency.set", # Update Set Data type latency.
+ "dt.latency.map", # Update Map Data type latency.
+ "search.latency.query", # Search query latency.
+ "search.latency.index", # Time it takes for search to index a new document.
+ "consistent.latency.get", # Strong consistent read latency.
+ "consistent.latency.put", # Strong consistent write latency.
+
+ # Erlang resource usage metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#erlang-resource-usage-metrics
+ # Processes collected as a gauge,
+ # memory collected as Megabytes, returned as bytes from Riak.
+ "vm.processes", # Number of processes currently running in the Erlang VM.
+ "vm.memory.processes", # Total amount of memory allocated & used for Erlang processes.
+
+ # General Riak Load / Health metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-load-health-metrics
+ # The following are collected by Riak over the past minute:
+ "kv.siblings_encountered.get", # Siblings encountered during GET operations by this node.
+ "kv.objsize.get", # Object size encountered by this node.
+ "search.vnodeq_size", # Number of unprocessed messages in the vnode message queues (Search).
+ # The following are calculated in total, or as gauges:
+ "search.index_errors", # Errors of the search subsystem while indexing documents.
+ "core.pbc", # Number of currently active protocol buffer connections.
+ "core.repairs", # Total read repair operations coordinated by this node.
+ "core.fsm_active", # Active finite state machines by kind.
+ "core.fsm_rejected", # Rejected finite state machines by kind.
+
+ # General Riak Search Load / Health metrics
+ # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-search-load-health-metrics
+ # Reported as counters.
+ "search.errors", # Write and read errors of the Search subsystem.
+]
+
+CHARTS = {
+ # Throughput metrics
+ "kv.node_operations": {
+ "options": [None, "Reads & writes coordinated by this node", "operations/s", "throughput", "riak.kv.throughput",
+ "line"],
+ "lines": [
+ ["node_gets_total", "gets", "incremental"],
+ ["node_puts_total", "puts", "incremental"]
+ ]
+ },
+ "dt.vnode_updates": {
+ "options": [None, "Update operations coordinated by local vnodes by data type", "operations/s", "throughput",
+ "riak.dt.vnode_updates", "line"],
+ "lines": [
+ ["vnode_counter_update_total", "counters", "incremental"],
+ ["vnode_set_update_total", "sets", "incremental"],
+ ["vnode_map_update_total", "maps", "incremental"],
+ ]
+ },
+ "search.queries": {
+ "options": [None, "Search queries on the node", "queries/s", "throughput", "riak.search", "line"],
+ "lines": [
+ ["search_query_throughput_count", "queries", "incremental"]
+ ]
+ },
+ "search.documents": {
+ "options": [None, "Documents indexed by search", "documents/s", "throughput", "riak.search.documents", "line"],
+ "lines": [
+ ["search_index_throughput_count", "indexed", "incremental"]
+ ]
+ },
+ "consistent.operations": {
+ "options": [None, "Consistent node operations", "operations/s", "throughput", "riak.consistent.operations",
+ "line"],
+ "lines": [
+ ["consistent_gets_total", "gets", "incremental"],
+ ["consistent_puts_total", "puts", "incremental"],
+ ]
+ },
+
+ # Latency metrics
+ "kv.latency.get": {
+ "options": [None, "Time between reception of a client GET request and subsequent response to client", "ms",
+ "latency", "riak.kv.latency.get", "line"],
+ "lines": [
+ ["node_get_fsm_time_mean", "mean", "absolute", 1, 1000],
+ ["node_get_fsm_time_median", "median", "absolute", 1, 1000],
+ ["node_get_fsm_time_95", "95", "absolute", 1, 1000],
+ ["node_get_fsm_time_99", "99", "absolute", 1, 1000],
+ ["node_get_fsm_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "kv.latency.put": {
+ "options": [None, "Time between reception of a client PUT request and subsequent response to client", "ms",
+ "latency", "riak.kv.latency.put", "line"],
+ "lines": [
+ ["node_put_fsm_time_mean", "mean", "absolute", 1, 1000],
+ ["node_put_fsm_time_median", "median", "absolute", 1, 1000],
+ ["node_put_fsm_time_95", "95", "absolute", 1, 1000],
+ ["node_put_fsm_time_99", "99", "absolute", 1, 1000],
+ ["node_put_fsm_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "dt.latency.counter": {
+ "options": [None, "Time it takes to perform an Update Counter operation", "ms", "latency",
+ "riak.dt.latency.counter_merge", "line"],
+ "lines": [
+ ["object_counter_merge_time_mean", "mean", "absolute", 1, 1000],
+ ["object_counter_merge_time_median", "median", "absolute", 1, 1000],
+ ["object_counter_merge_time_95", "95", "absolute", 1, 1000],
+ ["object_counter_merge_time_99", "99", "absolute", 1, 1000],
+ ["object_counter_merge_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "dt.latency.set": {
+ "options": [None, "Time it takes to perform an Update Set operation", "ms", "latency",
+ "riak.dt.latency.set_merge", "line"],
+ "lines": [
+ ["object_set_merge_time_mean", "mean", "absolute", 1, 1000],
+ ["object_set_merge_time_median", "median", "absolute", 1, 1000],
+ ["object_set_merge_time_95", "95", "absolute", 1, 1000],
+ ["object_set_merge_time_99", "99", "absolute", 1, 1000],
+ ["object_set_merge_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "dt.latency.map": {
+ "options": [None, "Time it takes to perform an Update Map operation", "ms", "latency",
+ "riak.dt.latency.map_merge", "line"],
+ "lines": [
+ ["object_map_merge_time_mean", "mean", "absolute", 1, 1000],
+ ["object_map_merge_time_median", "median", "absolute", 1, 1000],
+ ["object_map_merge_time_95", "95", "absolute", 1, 1000],
+ ["object_map_merge_time_99", "99", "absolute", 1, 1000],
+ ["object_map_merge_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "search.latency.query": {
+ "options": [None, "Search query latency", "ms", "latency", "riak.search.latency.query", "line"],
+ "lines": [
+ ["search_query_latency_median", "median", "absolute", 1, 1000],
+ ["search_query_latency_min", "min", "absolute", 1, 1000],
+ ["search_query_latency_95", "95", "absolute", 1, 1000],
+ ["search_query_latency_99", "99", "absolute", 1, 1000],
+ ["search_query_latency_999", "999", "absolute", 1, 1000],
+ ["search_query_latency_max", "max", "absolute", 1, 1000],
+ ]
+ },
+ "search.latency.index": {
+ "options": [None, "Time it takes Search to index a new document", "ms", "latency", "riak.search.latency.index",
+ "line"],
+ "lines": [
+ ["search_index_latency_median", "median", "absolute", 1, 1000],
+ ["search_index_latency_min", "min", "absolute", 1, 1000],
+ ["search_index_latency_95", "95", "absolute", 1, 1000],
+ ["search_index_latency_99", "99", "absolute", 1, 1000],
+ ["search_index_latency_999", "999", "absolute", 1, 1000],
+ ["search_index_latency_max", "max", "absolute", 1, 1000],
+ ]
+ },
+
+ # Riak Strong Consistency metrics
+ "consistent.latency.get": {
+ "options": [None, "Strongly consistent read latency", "ms", "latency", "riak.consistent.latency.get", "line"],
+ "lines": [
+ ["consistent_get_time_mean", "mean", "absolute", 1, 1000],
+ ["consistent_get_time_median", "median", "absolute", 1, 1000],
+ ["consistent_get_time_95", "95", "absolute", 1, 1000],
+ ["consistent_get_time_99", "99", "absolute", 1, 1000],
+ ["consistent_get_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+ "consistent.latency.put": {
+ "options": [None, "Strongly consistent write latency", "ms", "latency", "riak.consistent.latency.put", "line"],
+ "lines": [
+ ["consistent_put_time_mean", "mean", "absolute", 1, 1000],
+ ["consistent_put_time_median", "median", "absolute", 1, 1000],
+ ["consistent_put_time_95", "95", "absolute", 1, 1000],
+ ["consistent_put_time_99", "99", "absolute", 1, 1000],
+ ["consistent_put_time_100", "100", "absolute", 1, 1000],
+ ]
+ },
+
+ # BEAM metrics
+ "vm.processes": {
+ "options": [None, "Total processes running in the Erlang VM", "total", "vm", "riak.vm", "line"],
+ "lines": [
+ ["sys_process_count", "processes", "absolute"],
+ ]
+ },
+ "vm.memory.processes": {
+ "options": [None, "Memory allocated & used by Erlang processes", "MB", "vm", "riak.vm.memory.processes",
+ "line"],
+ "lines": [
+ ["memory_processes", "allocated", "absolute", 1, 1024 * 1024],
+ ["memory_processes_used", "used", "absolute", 1, 1024 * 1024]
+ ]
+ },
+
+ # General Riak Load/Health metrics
+ "kv.siblings_encountered.get": {
+ "options": [None, "Number of siblings encountered during GET operations by this node during the past minute",
+ "siblings", "load", "riak.kv.siblings_encountered.get", "line"],
+ "lines": [
+ ["node_get_fsm_siblings_mean", "mean", "absolute"],
+ ["node_get_fsm_siblings_median", "median", "absolute"],
+ ["node_get_fsm_siblings_95", "95", "absolute"],
+ ["node_get_fsm_siblings_99", "99", "absolute"],
+ ["node_get_fsm_siblings_100", "100", "absolute"],
+ ]
+ },
+ "kv.objsize.get": {
+ "options": [None, "Object size encountered by this node during the past minute", "KB", "load",
+ "riak.kv.objsize.get", "line"],
+ "lines": [
+ ["node_get_fsm_objsize_mean", "mean", "absolute", 1, 1024],
+ ["node_get_fsm_objsize_median", "median", "absolute", 1, 1024],
+ ["node_get_fsm_objsize_95", "95", "absolute", 1, 1024],
+ ["node_get_fsm_objsize_99", "99", "absolute", 1, 1024],
+ ["node_get_fsm_objsize_100", "100", "absolute", 1, 1024],
+ ]
+ },
+ "search.vnodeq_size": {
+ "options": [None,
+ "Number of unprocessed messages in the vnode message queues of Search on this node in the past minute",
+ "messages", "load", "riak.search.vnodeq_size", "line"],
+ "lines": [
+ ["riak_search_vnodeq_mean", "mean", "absolute"],
+ ["riak_search_vnodeq_median", "median", "absolute"],
+ ["riak_search_vnodeq_95", "95", "absolute"],
+ ["riak_search_vnodeq_99", "99", "absolute"],
+ ["riak_search_vnodeq_100", "100", "absolute"],
+ ]
+ },
+ "search.index_errors": {
+ "options": [None, "Number of document index errors encountered by Search", "errors", "load",
+ "riak.search.index", "line"],
+ "lines": [
+ ["search_index_fail_count", "errors", "absolute"]
+ ]
+ },
+ "core.pbc": {
+ "options": [None, "Protocol buffer connections by status", "connections", "load",
+ "riak.core.protobuf_connections", "line"],
+ "lines": [
+ ["pbc_active", "active", "absolute"],
+ # ["pbc_connects", "established_pastmin", "absolute"]
+ ]
+ },
+ "core.repairs": {
+ "options": [None, "Number of repair operations this node has coordinated", "repairs", "load",
+ "riak.core.repairs", "line"],
+ "lines": [
+ ["read_repairs", "read", "absolute"]
+ ]
+ },
+ "core.fsm_active": {
+ "options": [None, "Active finite state machines by kind", "fsms", "load", "riak.core.fsm_active", "line"],
+ "lines": [
+ ["node_get_fsm_active", "get", "absolute"],
+ ["node_put_fsm_active", "put", "absolute"],
+ ["index_fsm_active", "secondary index", "absolute"],
+ ["list_fsm_active", "list keys", "absolute"]
+ ]
+ },
+ "core.fsm_rejected": {
+ # Writing "Sidejob's" here seems to cause some weird issues: it results in this chart being rendered in
+ # its own context and additionally, moves the entire Riak graph all the way up to the top of the Netdata
+ # dashboard for some reason.
+ "options": [None, "Finite state machines being rejected by Sidejobs overload protection", "fsms", "load",
+ "riak.core.fsm_rejected", "line"],
+ "lines": [
+ ["node_get_fsm_rejected", "get", "absolute"],
+ ["node_put_fsm_rejected", "put", "absolute"]
+ ]
+ },
+
+ # General Riak Search Load / Health metrics
+ "search.errors": {
+ "options": [None, "Number of writes to Search failed due to bad data format by reason", "writes", "load",
+ "riak.search.index", "line"],
+ "lines": [
+ ["search_index_bad_entry_count", "bad_entry", "absolute"],
+ ["search_index_extract_fail_count", "extract_fail", "absolute"],
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ try:
+ return loads(raw)
+ except (TypeError, ValueError) as err:
+ self.error(err)
+ return None
diff --git a/collectors/python.d.plugin/riakkv/riakkv.conf b/collectors/python.d.plugin/riakkv/riakkv.conf
new file mode 100644
index 00000000..be01c48a
--- /dev/null
+++ b/collectors/python.d.plugin/riakkv/riakkv.conf
@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for riak
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ url : 'http://localhost:8098/stats'
diff --git a/collectors/python.d.plugin/samba/Makefile.inc b/collectors/python.d.plugin/samba/Makefile.inc
new file mode 100644
index 00000000..230a8ba4
--- /dev/null
+++ b/collectors/python.d.plugin/samba/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += samba/samba.chart.py
+dist_pythonconfig_DATA += samba/samba.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += samba/README.md samba/Makefile.inc
+
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
new file mode 120000
index 00000000..3b63bbab
--- /dev/null
+++ b/collectors/python.d.plugin/samba/README.md
@@ -0,0 +1 @@
+integrations/samba.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/samba/integrations/samba.md b/collectors/python.d.plugin/samba/integrations/samba.md
new file mode 100644
index 00000000..1bd1664e
--- /dev/null
+++ b/collectors/python.d.plugin/samba/integrations/samba.md
@@ -0,0 +1,221 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/samba/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/samba/metadata.yaml"
+sidebar_label: "Samba"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Samba
+
+
+<img src="https://netdata.cloud/img/samba.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: samba
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the performance metrics of Samba file sharing.
+
+It is using the `smbstatus` command-line tool.
+
+Executed commands:
+
+- `sudo -n smbstatus -P`
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+After all the permissions are satisfied, the `smbstatus -P` binary is executed.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Samba instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| syscall.rw | sendfile, recvfile | KiB/s |
+| smb2.rw | readout, writein, readin, writeout | KiB/s |
+| smb2.create_close | create, close | operations/s |
+| smb2.get_set_info | getinfo, setinfo | operations/s |
+| smb2.find | find | operations/s |
+| smb2.notify | notify | operations/s |
+| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable the samba collector
+
+The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d.conf
+```
+Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
+
+
+#### Permissions and programs
+
+To run the collector you need:
+
+- `smbstatus` program
+- `sudo` program
+- `smbd` must be compiled with profiling enabled
+- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
+
+The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
+
+- add to your `/etc/sudoers` file:
+
+ `which smbstatus` shows the full path to the binary.
+
+ ```bash
+ netdata ALL=(root) NOPASSWD: /path/to/smbstatus
+ ```
+
+- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)
+
+ The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.
+
+
+ As the `root` user, do the following:
+
+ ```cmd
+ mkdir /etc/systemd/system/netdata.service.d
+ echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+ systemctl daemon-reload
+ systemctl restart netdata.service
+ ```
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/samba.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/samba.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details><summary>Config</summary>
+
+```yaml
+my_job_name:
+ name: my_name
+ update_every: 1
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin samba debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/samba/metadata.yaml b/collectors/python.d.plugin/samba/metadata.yaml
new file mode 100644
index 00000000..ec31e047
--- /dev/null
+++ b/collectors/python.d.plugin/samba/metadata.yaml
@@ -0,0 +1,205 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: samba
+ monitored_instance:
+ name: Samba
+ link: https://www.samba.org/samba/
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: "samba.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - samba
+ - file sharing
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors the performance metrics of Samba file sharing."
+ method_description: |
+ It is using the `smbstatus` command-line tool.
+
+ Executed commands:
+
+ - `sudo -n smbstatus -P`
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: |
+ `smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
+ default_behavior:
+ auto_detection:
+ description: "After all the permissions are satisfied, the `smbstatus -P` binary is executed."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable the samba collector
+ description: |
+ The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+
+ ```bash
+ cd /etc/netdata # Replace this path with your Netdata config directory, if different
+ sudo ./edit-config python.d.conf
+ ```
+ Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
+ - title: Permissions and programs
+ description: |
+ To run the collector you need:
+
+ - `smbstatus` program
+ - `sudo` program
+ - `smbd` must be compiled with profiling enabled
+ - `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
+
+ The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
+
+ - add to your `/etc/sudoers` file:
+
+ `which smbstatus` shows the full path to the binary.
+
+ ```bash
+ netdata ALL=(root) NOPASSWD: /path/to/smbstatus
+ ```
+
+ - Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)
+
+ The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.
+
+
+ As the `root` user, do the following:
+
+ ```cmd
+ mkdir /etc/systemd/system/netdata.service.d
+ echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+ systemctl daemon-reload
+ systemctl restart netdata.service
+ ```
+ configuration:
+ file:
+ name: python.d/samba.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ my_job_name:
+ name: my_name
+ update_every: 1
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: syscall.rw
+ description: R/Ws
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: sendfile
+ - name: recvfile
+ - name: smb2.rw
+ description: R/Ws
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: readout
+ - name: writein
+ - name: readin
+ - name: writeout
+ - name: smb2.create_close
+ description: Create/Close
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: create
+ - name: close
+ - name: smb2.get_set_info
+ description: Info
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: getinfo
+ - name: setinfo
+ - name: smb2.find
+ description: Find
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: find
+ - name: smb2.notify
+ description: Notify
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: notify
+ - name: smb2.sm_counters
+ description: Lesser Ops
+ unit: "count"
+ chart_type: stacked
+ dimensions:
+ - name: tcon
+ - name: negprot
+ - name: tdis
+ - name: cancel
+ - name: logoff
+ - name: flush
+ - name: lock
+ - name: keepalive
+ - name: break
+ - name: sessetup
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
new file mode 100644
index 00000000..8eebcd60
--- /dev/null
+++ b/collectors/python.d.plugin/samba/samba.chart.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+# Description: samba netdata python.d module
+# Author: Christopher Cox <chris_cox@endlessnow.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# The netdata user needs to be able to be able to sudo the smbstatus program
+# without password:
+# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+#
+# This makes calls to smbstatus -P
+#
+# This just looks at a couple of values out of syscall, and some from smb2.
+#
+# The Lesser Ops chart is merely a display of current counter values. They
+# didn't seem to change much to me. However, if you notice something changing
+# a lot there, bring one or more out into its own chart and make it incremental
+# (like find and notify... good examples).
+
+import re
+import os
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+disabled_by_default = True
+
+update_every = 5
+
+ORDER = [
+ 'syscall_rw',
+ 'smb2_rw',
+ 'smb2_create_close',
+ 'smb2_info',
+ 'smb2_find',
+ 'smb2_notify',
+ 'smb2_sm_count'
+]
+
+CHARTS = {
+ 'syscall_rw': {
+ 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'],
+ 'lines': [
+ ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
+ ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_rw': {
+ 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'],
+ 'lines': [
+ ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
+ ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
+ ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
+ ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
+ ]
+ },
+ 'smb2_create_close': {
+ 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'],
+ 'lines': [
+ ['smb2_create_count', 'create', 'incremental', 1, 1],
+ ['smb2_close_count', 'close', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_info': {
+ 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'],
+ 'lines': [
+ ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
+ ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
+ ]
+ },
+ 'smb2_find': {
+ 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'],
+ 'lines': [
+ ['smb2_find_count', 'find', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_notify': {
+ 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'],
+ 'lines': [
+ ['smb2_notify_count', 'notify', 'incremental', 1, 1]
+ ]
+ },
+ 'smb2_sm_count': {
+ 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'],
+ 'lines': [
+ ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
+ ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
+ ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
+ ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
+ ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
+ ['smb2_flush_count', 'flush', 'absolute', 1, 1],
+ ['smb2_lock_count', 'lock', 'absolute', 1, 1],
+ ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
+ ['smb2_break_count', 'break', 'absolute', 1, 1],
+ ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
+ ]
+ }
+}
+
+SUDO = 'sudo'
+SMBSTATUS = 'smbstatus'
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
+
+ def check(self):
+ smbstatus_binary = find_binary(SMBSTATUS)
+ if not smbstatus_binary:
+ self.error("can't locate '{0}' binary".format(SMBSTATUS))
+ return False
+
+ if os.getuid() == 0:
+ self.command = ' '.join([smbstatus_binary, '-P'])
+ return ExecutableService.check(self)
+
+ sudo_binary = find_binary(SUDO)
+ if not sudo_binary:
+ self.error("can't locate '{0}' binary".format(SUDO))
+ return False
+ command = [sudo_binary, '-n', '-l', smbstatus_binary, '-P']
+ smbstatus = '{0} -P'.format(smbstatus_binary)
+ allowed = self._get_raw_data(command=command)
+ if not (allowed and allowed[0].strip() == smbstatus):
+ self.error("not allowed to run sudo for command '{0}'".format(smbstatus))
+ return False
+ self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
+ return ExecutableService.check(self)
+
+ def _get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ parsed = self.rgx_smb2.findall(' '.join(raw_data))
+
+ return dict(parsed) or None
diff --git a/collectors/python.d.plugin/samba/samba.conf b/collectors/python.d.plugin/samba/samba.conf
new file mode 100644
index 00000000..db15d4e9
--- /dev/null
+++ b/collectors/python.d.plugin/samba/samba.conf
@@ -0,0 +1,60 @@
+# netdata python.d.plugin configuration for samba
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file
diff --git a/collectors/python.d.plugin/sensors/Makefile.inc b/collectors/python.d.plugin/sensors/Makefile.inc
new file mode 100644
index 00000000..5fb26e1c
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += sensors/sensors.chart.py
+dist_pythonconfig_DATA += sensors/sensors.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
+
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
new file mode 120000
index 00000000..4e92b088
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -0,0 +1 @@
+integrations/linux_sensors_lm-sensors.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md b/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md
new file mode 100644
index 00000000..e426c8c8
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md
@@ -0,0 +1,187 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/sensors/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/sensors/metadata.yaml"
+sidebar_label: "Linux Sensors (lm-sensors)"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Linux Sensors (lm-sensors)
+
+
+<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: sensors
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Examine Linux Sensors metrics with Netdata for insights into hardware health and performance.
+
+Enhance your system's reliability with real-time hardware health insights.
+
+
+Reads system sensors information (temperature, voltage, electric current, power, etc.) via [lm-sensors](https://hwmon.wiki.kernel.org/lm_sensors).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The following type of sensors are auto-detected:
+- temperature - fan - voltage - current - power - energy - humidity
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per chip
+
+Metrics related to chips. Each chip provides a set of the following metrics, each having the chip name in the metric name as reported by `sensors -u`.
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| sensors.temperature | a dimension per sensor | Celsius |
+| sensors.voltage | a dimension per sensor | Volts |
+| sensors.current | a dimension per sensor | Ampere |
+| sensors.power | a dimension per sensor | Watt |
+| sensors.fan | a dimension per sensor | Rotations/min |
+| sensors.energy | a dimension per sensor | Joule |
+| sensors.humidity | a dimension per sensor | Percent |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/sensors.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/sensors.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| types | The types of sensors to collect. | temperature, fan, voltage, current, power, energy, humidity | yes |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+
+</details>
+
+#### Examples
+
+##### Default
+
+Default configuration.
+
+```yaml
+types:
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `sensors` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin sensors debug trace
+ ```
+
+### lm-sensors doesn't work on your device
+
+
+
+### ACPI ring buffer errors are printed
+
+
+
+
diff --git a/collectors/python.d.plugin/sensors/metadata.yaml b/collectors/python.d.plugin/sensors/metadata.yaml
new file mode 100644
index 00000000..d7cb2206
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/metadata.yaml
@@ -0,0 +1,184 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: sensors
+ monitored_instance:
+ name: Linux Sensors (lm-sensors)
+ link: https://hwmon.wiki.kernel.org/lm_sensors
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ icon_filename: "microchip.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - sensors
+ - temperature
+ - voltage
+ - current
+ - power
+ - fan
+ - energy
+ - humidity
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Examine Linux Sensors metrics with Netdata for insights into hardware health and performance.
+
+ Enhance your system's reliability with real-time hardware health insights.
+ method_description: >
+ Reads system sensors information (temperature, voltage, electric current, power, etc.) via [lm-sensors](https://hwmon.wiki.kernel.org/lm_sensors).
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: >
+ The following type of sensors are auto-detected:
+
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: python.d/sensors.conf
+ description: ""
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: types
+ description: The types of sensors to collect.
+ default_value: "temperature, fan, voltage, current, power, energy, humidity"
+ required: true
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 1
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: Config
+ list:
+ - name: Default
+ folding:
+ enabled: false
+ description: Default configuration.
+ config: |
+ types:
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+ troubleshooting:
+ problems:
+ list:
+ - name: lm-sensors doesn't work on your device
+ description: |
+ When `lm-sensors` doesn't work on your device (e.g. for RPi temperatures),
+ use [the legacy bash collector](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/sensors/README.md)
+ - name: ACPI ring buffer errors are printed
+ description: |
+ There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`)
+ when ACPI sensors are being accessed. We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827).
+ Please join this discussion for help.
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: chip
+ description: >
+ Metrics related to chips. Each chip provides a set of the following metrics, each having the chip name in the metric name as reported by `sensors -u`.
+ labels: []
+ metrics:
+ - name: sensors.temperature
+ description: Temperature
+ unit: "Celsius"
+ chart_type: line
+ dimensions:
+ - name: a dimension per sensor
+ - name: sensors.voltage
+ description: Voltage
+ unit: "Volts"
+ chart_type: line
+ dimensions:
+ - name: a dimension per sensor
+ - name: sensors.current
+ description: Current
+ unit: "Ampere"
+ chart_type: line
+ dimensions:
+ - name: a dimension per sensor
+ - name: sensors.power
+ description: Power
+ unit: "Watt"
+ chart_type: line
+ dimensions:
+ - name: a dimension per sensor
+ - name: sensors.fan
+ description: Fans speed
+ unit: "Rotations/min"
+ chart_type: line
+ dimensions:
+ - name: a dimension per sensor
+ - name: sensors.energy
+ description: Energy
+ unit: "Joule"
+ chart_type: line
+ dimensions:
+ - name: a dimension per sensor
+ - name: sensors.humidity
+ description: Humidity
+ unit: "Percent"
+ chart_type: line
+ dimensions:
+ - name: a dimension per sensor
diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
new file mode 100644
index 00000000..0d9de375
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+# Description: sensors netdata python.d plugin
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import defaultdict
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from third_party import lm_sensors as sensors
+
+ORDER = [
+ 'temperature',
+ 'fan',
+ 'voltage',
+ 'current',
+ 'power',
+ 'energy',
+ 'humidity',
+]
+
+# This is a prototype of chart definition which is used to dynamically create self.definitions
+CHARTS = {
+ 'temperature': {
+ 'options': [None, 'Temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'voltage': {
+ 'options': [None, 'Voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'current': {
+ 'options': [None, 'Current', 'Ampere', 'current', 'sensors.current', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'power': {
+ 'options': [None, 'Power', 'Watt', 'power', 'sensors.power', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'fan': {
+ 'options': [None, 'Fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ },
+ 'energy': {
+ 'options': [None, 'Energy', 'Joule', 'energy', 'sensors.energy', 'line'],
+ 'lines': [
+ [None, None, 'incremental', 1, 1000]
+ ]
+ },
+ 'humidity': {
+ 'options': [None, 'Humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'],
+ 'lines': [
+ [None, None, 'absolute', 1, 1000]
+ ]
+ }
+}
+
+LIMITS = {
+ 'temperature': [-127, 1000],
+ 'voltage': [-400, 400],
+ 'current': [-127, 127],
+ 'fan': [0, 65535]
+}
+
+TYPE_MAP = {
+ 0: 'voltage',
+ 1: 'fan',
+ 2: 'temperature',
+ 3: 'power',
+ 4: 'energy',
+ 5: 'current',
+ 6: 'humidity',
+ # 7: 'max_main',
+ # 16: 'vid',
+ # 17: 'intrusion',
+ # 18: 'max_other',
+ # 24: 'beep_enable'
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = list()
+ self.definitions = dict()
+ self.chips = configuration.get('chips')
+ self.priority = 60000
+
+ def get_data(self):
+ seen, data = dict(), dict()
+ try:
+ for chip in sensors.ChipIterator():
+ chip_name = sensors.chip_snprintf_name(chip)
+ seen[chip_name] = defaultdict(list)
+
+ for feat in sensors.FeatureIterator(chip):
+ if feat.type not in TYPE_MAP:
+ continue
+
+ feat_type = TYPE_MAP[feat.type]
+ feat_name = str(feat.name.decode())
+ feat_label = sensors.get_label(chip, feat)
+ feat_limits = LIMITS.get(feat_type)
+ sub_feat = next(sensors.SubFeatureIterator(chip, feat)) # current value
+
+ if not sub_feat:
+ continue
+
+ try:
+ v = sensors.get_value(chip, sub_feat.number)
+ except sensors.SensorsError:
+ continue
+
+ if v is None:
+ continue
+
+ seen[chip_name][feat_type].append((feat_name, feat_label))
+
+ if feat_limits and (v < feat_limits[0] or v > feat_limits[1]):
+ continue
+
+ data[chip_name + '_' + feat_name] = int(v * 1000)
+
+ except sensors.SensorsError as error:
+ self.error(error)
+ return None
+
+ self.update_sensors_charts(seen)
+
+ return data or None
+
+ def update_sensors_charts(self, seen):
+ for chip_name, feat in seen.items():
+ if self.chips and not any([chip_name.startswith(ex) for ex in self.chips]):
+ continue
+
+ for feat_type, sub_feat in feat.items():
+ if feat_type not in ORDER or feat_type not in CHARTS:
+ continue
+
+ chart_id = '{}_{}'.format(chip_name, feat_type)
+ if chart_id in self.charts:
+ continue
+
+ params = [chart_id] + list(CHARTS[feat_type]['options'])
+ new_chart = self.charts.add_chart(params)
+ new_chart.params['priority'] = self.get_chart_priority(feat_type)
+
+ for name, label in sub_feat:
+ lines = list(CHARTS[feat_type]['lines'][0])
+ lines[0] = chip_name + '_' + name
+ lines[1] = label
+ new_chart.add_dimension(lines)
+
+ def check(self):
+ try:
+ sensors.init()
+ except sensors.SensorsError as error:
+ self.error(error)
+ return False
+
+ self.priority = self.charts.priority
+
+ return bool(self.get_data() and self.charts)
+
+ def get_chart_priority(self, feat_type):
+ for i, v in enumerate(ORDER):
+ if v == feat_type:
+ return self.priority + i
+ return self.priority
diff --git a/collectors/python.d.plugin/sensors/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf
new file mode 100644
index 00000000..d3369ba6
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/sensors.conf
@@ -0,0 +1,61 @@
+# netdata python.d.plugin configuration for sensors
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors types.
+# Comment the ones you want to disable.
+# Also, re-arranging this list controls the order of the charts at the
+# netdata dashboard.
+
+types:
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors chips.
+# Uncomment the first line (chips:) and add chip names below it.
+# The chip names that start with like that will be matched.
+# You can find the chip names using the sensors command.
+
+#chips:
+# - i8k
+# - coretemp
+#
+# chip names can be found using the sensors shell command
+# the prefix is matched (anything that starts like that)
+#
+#----------------------------------------------------------------------
+
diff --git a/collectors/python.d.plugin/smartd_log/Makefile.inc b/collectors/python.d.plugin/smartd_log/Makefile.inc
new file mode 100644
index 00000000..dc1d0f3f
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += smartd_log/smartd_log.chart.py
+dist_pythonconfig_DATA += smartd_log/smartd_log.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += smartd_log/README.md smartd_log/Makefile.inc
+
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
new file mode 120000
index 00000000..63aad6c8
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -0,0 +1 @@
+integrations/s.m.a.r.t..md \ No newline at end of file
diff --git a/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md b/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md
new file mode 100644
index 00000000..5c5b569e
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md
@@ -0,0 +1,223 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/smartd_log/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/smartd_log/metadata.yaml"
+sidebar_label: "S.M.A.R.T."
+learn_status: "Published"
+learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# S.M.A.R.T.
+
+
+<img src="https://netdata.cloud/img/smart.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: smartd_log
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors HDD/SSD S.M.A.R.T. metrics about drive health and performance.
+
+
+It reads `smartd` log files to collect the metrics.
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+Upon satisfying the prerequisites, the collector will auto-detect metrics if written in either `/var/log/smartd/` or `/var/lib/smartmontools/`.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+The metrics listed below are split in terms of availability on device type, SCSI or ATA.
+
+### Per S.M.A.R.T. instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | SCSI | ATA |
+|:------|:----------|:----|:---:|:---:|
+| smartd_log.read_error_rate | a dimension per device | value | | • |
+| smartd_log.seek_error_rate | a dimension per device | value | | • |
+| smartd_log.soft_read_error_rate | a dimension per device | errors | | • |
+| smartd_log.write_error_rate | a dimension per device | value | | • |
+| smartd_log.read_total_err_corrected | a dimension per device | errors | • | |
+| smartd_log.read_total_unc_errors | a dimension per device | errors | • | |
+| smartd_log.write_total_err_corrected | a dimension per device | errors | • | |
+| smartd_log.write_total_unc_errors | a dimension per device | errors | • | |
+| smartd_log.verify_total_err_corrected | a dimension per device | errors | • | |
+| smartd_log.verify_total_unc_errors | a dimension per device | errors | • | |
+| smartd_log.sata_interface_downshift | a dimension per device | events | | • |
+| smartd_log.udma_crc_error_count | a dimension per device | errors | | • |
+| smartd_log.throughput_performance | a dimension per device | value | | • |
+| smartd_log.seek_time_performance | a dimension per device | value | | • |
+| smartd_log.start_stop_count | a dimension per device | events | | • |
+| smartd_log.power_on_hours_count | a dimension per device | hours | | • |
+| smartd_log.power_cycle_count | a dimension per device | events | | • |
+| smartd_log.unexpected_power_loss | a dimension per device | events | | • |
+| smartd_log.spin_up_time | a dimension per device | ms | | • |
+| smartd_log.spin_up_retries | a dimension per device | retries | | • |
+| smartd_log.calibration_retries | a dimension per device | retries | | • |
+| smartd_log.airflow_temperature_celsius | a dimension per device | celsius | | • |
+| smartd_log.temperature_celsius | a dimension per device | celsius | • | • |
+| smartd_log.reallocated_sectors_count | a dimension per device | sectors | | • |
+| smartd_log.reserved_block_count | a dimension per device | percentage | | • |
+| smartd_log.program_fail_count | a dimension per device | errors | | • |
+| smartd_log.erase_fail_count | a dimension per device | failures | | • |
+| smartd_log.wear_leveller_worst_case_erase_count | a dimension per device | erases | | • |
+| smartd_log.unused_reserved_nand_blocks | a dimension per device | blocks | | • |
+| smartd_log.reallocation_event_count | a dimension per device | events | | • |
+| smartd_log.current_pending_sector_count | a dimension per device | sectors | | • |
+| smartd_log.offline_uncorrectable_sector_count | a dimension per device | sectors | | • |
+| smartd_log.percent_lifetime_used | a dimension per device | percentage | | • |
+| smartd_log.media_wearout_indicator | a dimension per device | percentage | | • |
+| smartd_log.nand_writes_1gib | a dimension per device | GiB | | • |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure `smartd` to write attribute information to files.
+
+`smartd` must be running with `-A` option to write `smartd` attribute information to files.
+
+For this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:
+
+```
+# dump smartd attrs info every 600 seconds
+smartd_opts="-A /var/log/smartd/ -i 600"
+```
+
+You may need to create the smartd directory before smartd will write to it:
+
+```sh
+mkdir -p /var/log/smartd
+```
+
+Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also <https://linux.die.net/man/8/smartd> for more info on the `-A --attributelog=PREFIX` command.
+
+`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/smartd_log.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/smartd_log.conf
+```
+#### Options
+
+This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| log_path | path to smartd log files. | /var/log/smartd | yes |
+| exclude_disks | Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it. | | no |
+| age | Time in minutes since the last dump to file. | 30 | no |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic configuration example.
+
+```yaml
+custom:
+ name: smartd_log
+ log_path: '/var/log/smartd/'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `smartd_log` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin smartd_log debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/smartd_log/metadata.yaml b/collectors/python.d.plugin/smartd_log/metadata.yaml
new file mode 100644
index 00000000..d1194969
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/metadata.yaml
@@ -0,0 +1,429 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: smartd_log
+ monitored_instance:
+ name: S.M.A.R.T.
+ link: "https://linux.die.net/man/8/smartd"
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ icon_filename: "smart.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - smart
+ - S.M.A.R.T.
+ - SCSI devices
+ - ATA devices
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors HDD/SSD S.M.A.R.T. metrics about drive health and performance.
+ method_description: |
+ It reads `smartd` log files to collect the metrics.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: Upon satisfying the prerequisites, the collector will auto-detect metrics if written in either `/var/log/smartd/` or `/var/lib/smartmontools/`.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Configure `smartd` to write attribute information to files.
+ description: |
+ `smartd` must be running with `-A` option to write `smartd` attribute information to files.
+
+ For this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:
+
+ ```
+ # dump smartd attrs info every 600 seconds
+ smartd_opts="-A /var/log/smartd/ -i 600"
+ ```
+
+ You may need to create the smartd directory before smartd will write to it:
+
+ ```sh
+ mkdir -p /var/log/smartd
+ ```
+
+ Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also <https://linux.die.net/man/8/smartd> for more info on the `-A --attributelog=PREFIX` command.
+
+ `smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.
+ configuration:
+ file:
+ name: "python.d/smartd_log.conf"
+ options:
+ description: |
+ This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
+
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: log_path
+ description: path to smartd log files.
+ default_value: /var/log/smartd
+ required: true
+ - name: exclude_disks
+ description: Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it.
+ default_value: ""
+ required: false
+ - name: age
+ description: Time in minutes since the last dump to file.
+ default_value: 30
+ required: false
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 1
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: >
+ Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic configuration example.
+ folding:
+ enabled: false
+ config: |
+ custom:
+ name: smartd_log
+ log_path: '/var/log/smartd/'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: "The metrics listed below are split in terms of availability on device type, SCSI or ATA."
+ availability:
+ - "SCSI"
+ - "ATA"
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: smartd_log.read_error_rate
+ description: Read Error Rate
+ availability:
+ - ATA
+ unit: "value"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.seek_error_rate
+ description: Seek Error Rate
+ availability:
+ - ATA
+ unit: "value"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.soft_read_error_rate
+ description: Soft Read Error Rate
+ availability:
+ - ATA
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.write_error_rate
+ description: Write Error Rate
+ availability:
+ - ATA
+ unit: "value"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.read_total_err_corrected
+ description: Read Error Corrected
+ availability:
+ - SCSI
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.read_total_unc_errors
+ description: Read Error Uncorrected
+ availability:
+ - SCSI
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.write_total_err_corrected
+ description: Write Error Corrected
+ availability:
+ - SCSI
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.write_total_unc_errors
+ description: Write Error Uncorrected
+ availability:
+ - SCSI
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.verify_total_err_corrected
+ description: Verify Error Corrected
+ availability:
+ - SCSI
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.verify_total_unc_errors
+ description: Verify Error Uncorrected
+ availability:
+ - SCSI
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.sata_interface_downshift
+ description: SATA Interface Downshift
+ availability:
+ - ATA
+ unit: "events"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.udma_crc_error_count
+ description: UDMA CRC Error Count
+ availability:
+ - ATA
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.throughput_performance
+ description: Throughput Performance
+ availability:
+ - ATA
+ unit: "value"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.seek_time_performance
+ description: Seek Time Performance
+ availability:
+ - ATA
+ unit: "value"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.start_stop_count
+ description: Start/Stop Count
+ availability:
+ - ATA
+ unit: "events"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.power_on_hours_count
+ description: Power-On Hours Count
+ availability:
+ - ATA
+ unit: "hours"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.power_cycle_count
+ description: Power Cycle Count
+ availability:
+ - ATA
+ unit: "events"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.unexpected_power_loss
+ description: Unexpected Power Loss
+ availability:
+ - ATA
+ unit: "events"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.spin_up_time
+ description: Spin-Up Time
+ availability:
+ - ATA
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.spin_up_retries
+ description: Spin-up Retries
+ availability:
+ - ATA
+ unit: "retries"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.calibration_retries
+ description: Calibration Retries
+ availability:
+ - ATA
+ unit: "retries"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.airflow_temperature_celsius
+ description: Airflow Temperature Celsius
+ availability:
+ - ATA
+ unit: "celsius"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.temperature_celsius
+ description: Temperature
+ availability:
+ - SCSI
+ - ATA
+ unit: "celsius"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.reallocated_sectors_count
+ description: Reallocated Sectors Count
+ availability:
+ - ATA
+ unit: "sectors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.reserved_block_count
+ description: Reserved Block Count
+ availability:
+ - ATA
+ unit: "percentage"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.program_fail_count
+ description: Program Fail Count
+ availability:
+ - ATA
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.erase_fail_count
+ description: Erase Fail Count
+ availability:
+ - ATA
+ unit: "failures"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.wear_leveller_worst_case_erase_count
+ description: Wear Leveller Worst Case Erase Count
+ availability:
+ - ATA
+ unit: "erases"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.unused_reserved_nand_blocks
+ description: Unused Reserved NAND Blocks
+ availability:
+ - ATA
+ unit: "blocks"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.reallocation_event_count
+ description: Reallocation Event Count
+ availability:
+ - ATA
+ unit: "events"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.current_pending_sector_count
+ description: Current Pending Sector Count
+ availability:
+ - ATA
+ unit: "sectors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.offline_uncorrectable_sector_count
+ description: Offline Uncorrectable Sector Count
+ availability:
+ - ATA
+ unit: "sectors"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.percent_lifetime_used
+ description: Percent Lifetime Used
+ availability:
+ - ATA
+ unit: "percentage"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.media_wearout_indicator
+ description: Media Wearout Indicator
+ availability:
+ - ATA
+ unit: "percentage"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
+ - name: smartd_log.nand_writes_1gib
+ description: NAND Writes
+ availability:
+ - ATA
+ unit: "GiB"
+ chart_type: line
+ dimensions:
+ - name: a dimension per device
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
new file mode 100644
index 00000000..a896164d
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -0,0 +1,790 @@
+# -*- coding: utf-8 -*-
+# Description: smart netdata python.d module
+# Author: ilyam8, vorph1
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+from copy import deepcopy
+from time import time
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from bases.collection import read_last_line
+
+INCREMENTAL = 'incremental'
+ABSOLUTE = 'absolute'
+
+ATA = 'ata'
+SCSI = 'scsi'
+CSV = '.csv'
+
+DEF_RESCAN_INTERVAL = 60
+DEF_AGE = 30
+DEF_PATH = '/var/log/smartd'
+
+ATTR1 = '1'
+ATTR2 = '2'
+ATTR3 = '3'
+ATTR4 = '4'
+ATTR5 = '5'
+ATTR7 = '7'
+ATTR8 = '8'
+ATTR9 = '9'
+ATTR10 = '10'
+ATTR11 = '11'
+ATTR12 = '12'
+ATTR13 = '13'
+ATTR170 = '170'
+ATTR171 = '171'
+ATTR172 = '172'
+ATTR173 = '173'
+ATTR174 = '174'
+ATTR177 = '177'
+ATTR180 = '180'
+ATTR183 = '183'
+ATTR190 = '190'
+ATTR194 = '194'
+ATTR196 = '196'
+ATTR197 = '197'
+ATTR198 = '198'
+ATTR199 = '199'
+ATTR202 = '202'
+ATTR206 = '206'
+ATTR233 = '233'
+ATTR241 = '241'
+ATTR242 = '242'
+ATTR249 = '249'
+ATTR_READ_ERR_COR = 'read-total-err-corrected'
+ATTR_READ_ERR_UNC = 'read-total-unc-errors'
+ATTR_WRITE_ERR_COR = 'write-total-err-corrected'
+ATTR_WRITE_ERR_UNC = 'write-total-unc-errors'
+ATTR_VERIFY_ERR_COR = 'verify-total-err-corrected'
+ATTR_VERIFY_ERR_UNC = 'verify-total-unc-errors'
+ATTR_TEMPERATURE = 'temperature'
+
+RE_ATA = re.compile(
+ '(\d+);' # attribute
+ '(\d+);' # normalized value
+ '(\d+)', # raw value
+ re.X
+)
+
+RE_SCSI = re.compile(
+ '([a-z-]+);' # attribute
+ '([0-9.]+)', # raw value
+ re.X
+)
+
+ORDER = [
+ # errors
+ 'read_error_rate',
+ 'seek_error_rate',
+ 'soft_read_error_rate',
+ 'write_error_rate',
+ 'read_total_err_corrected',
+ 'read_total_unc_errors',
+ 'write_total_err_corrected',
+ 'write_total_unc_errors',
+ 'verify_total_err_corrected',
+ 'verify_total_unc_errors',
+ # external failure
+ 'sata_interface_downshift',
+ 'udma_crc_error_count',
+ # performance
+ 'throughput_performance',
+ 'seek_time_performance',
+ # power
+ 'start_stop_count',
+ 'power_on_hours_count',
+ 'power_cycle_count',
+ 'unexpected_power_loss',
+ # spin
+ 'spin_up_time',
+ 'spin_up_retries',
+ 'calibration_retries',
+ # temperature
+ 'airflow_temperature_celsius',
+ 'temperature_celsius',
+ # wear
+ 'reallocated_sectors_count',
+ 'reserved_block_count',
+ 'program_fail_count',
+ 'erase_fail_count',
+ 'wear_leveller_worst_case_erase_count',
+ 'unused_reserved_nand_blocks',
+ 'reallocation_event_count',
+ 'current_pending_sector_count',
+ 'offline_uncorrectable_sector_count',
+ 'percent_lifetime_used',
+ 'media_wearout_indicator',
+ 'total_lbas_written',
+ 'total_lbas_read',
+]
+
+CHARTS = {
+ 'read_error_rate': {
+ 'options': [None, 'Read Error Rate', 'value', 'errors', 'smartd_log.read_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR1],
+ 'algo': ABSOLUTE,
+ },
+ 'seek_error_rate': {
+ 'options': [None, 'Seek Error Rate', 'value', 'errors', 'smartd_log.seek_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR7],
+ 'algo': ABSOLUTE,
+ },
+ 'soft_read_error_rate': {
+ 'options': [None, 'Soft Read Error Rate', 'errors', 'errors', 'smartd_log.soft_read_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR13],
+ 'algo': INCREMENTAL,
+ },
+ 'write_error_rate': {
+ 'options': [None, 'Write Error Rate', 'value', 'errors', 'smartd_log.write_error_rate', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR206],
+ 'algo': ABSOLUTE,
+ },
+ 'read_total_err_corrected': {
+ 'options': [None, 'Read Error Corrected', 'errors', 'errors', 'smartd_log.read_total_err_corrected', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_READ_ERR_COR],
+ 'algo': INCREMENTAL,
+ },
+ 'read_total_unc_errors': {
+ 'options': [None, 'Read Error Uncorrected', 'errors', 'errors', 'smartd_log.read_total_unc_errors', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_READ_ERR_UNC],
+ 'algo': INCREMENTAL,
+ },
+ 'write_total_err_corrected': {
+ 'options': [None, 'Write Error Corrected', 'errors', 'errors', 'smartd_log.write_total_err_corrected', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_WRITE_ERR_COR],
+ 'algo': INCREMENTAL,
+ },
+ 'write_total_unc_errors': {
+ 'options': [None, 'Write Error Uncorrected', 'errors', 'errors', 'smartd_log.write_total_unc_errors', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_WRITE_ERR_UNC],
+ 'algo': INCREMENTAL,
+ },
+ 'verify_total_err_corrected': {
+ 'options': [None, 'Verify Error Corrected', 'errors', 'errors', 'smartd_log.verify_total_err_corrected',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_VERIFY_ERR_COR],
+ 'algo': INCREMENTAL,
+ },
+ 'verify_total_unc_errors': {
+ 'options': [None, 'Verify Error Uncorrected', 'errors', 'errors', 'smartd_log.verify_total_unc_errors', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR_VERIFY_ERR_UNC],
+ 'algo': INCREMENTAL,
+ },
+ 'sata_interface_downshift': {
+ 'options': [None, 'SATA Interface Downshift', 'events', 'external failure',
+ 'smartd_log.sata_interface_downshift', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR183],
+ 'algo': INCREMENTAL,
+ },
+ 'udma_crc_error_count': {
+ 'options': [None, 'UDMA CRC Error Count', 'errors', 'external failure', 'smartd_log.udma_crc_error_count',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR199],
+ 'algo': INCREMENTAL,
+ },
+ 'throughput_performance': {
+ 'options': [None, 'Throughput Performance', 'value', 'performance', 'smartd_log.throughput_performance',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR2],
+ 'algo': ABSOLUTE,
+ },
+ 'seek_time_performance': {
+ 'options': [None, 'Seek Time Performance', 'value', 'performance', 'smartd_log.seek_time_performance', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR8],
+ 'algo': ABSOLUTE,
+ },
+ 'start_stop_count': {
+ 'options': [None, 'Start/Stop Count', 'events', 'power', 'smartd_log.start_stop_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR4],
+ 'algo': ABSOLUTE,
+ },
+ 'power_on_hours_count': {
+ 'options': [None, 'Power-On Hours Count', 'hours', 'power', 'smartd_log.power_on_hours_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR9],
+ 'algo': ABSOLUTE,
+ },
+ 'power_cycle_count': {
+ 'options': [None, 'Power Cycle Count', 'events', 'power', 'smartd_log.power_cycle_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR12],
+ 'algo': ABSOLUTE,
+ },
+ 'unexpected_power_loss': {
+ 'options': [None, 'Unexpected Power Loss', 'events', 'power', 'smartd_log.unexpected_power_loss', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR174],
+ 'algo': ABSOLUTE,
+ },
+ 'spin_up_time': {
+ 'options': [None, 'Spin-Up Time', 'ms', 'spin', 'smartd_log.spin_up_time', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR3],
+ 'algo': ABSOLUTE,
+ },
+ 'spin_up_retries': {
+ 'options': [None, 'Spin-up Retries', 'retries', 'spin', 'smartd_log.spin_up_retries', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR10],
+ 'algo': INCREMENTAL,
+ },
+ 'calibration_retries': {
+ 'options': [None, 'Calibration Retries', 'retries', 'spin', 'smartd_log.calibration_retries', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR11],
+ 'algo': INCREMENTAL,
+ },
+ 'airflow_temperature_celsius': {
+ 'options': [None, 'Airflow Temperature Celsius', 'celsius', 'temperature',
+ 'smartd_log.airflow_temperature_celsius', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR190],
+ 'algo': ABSOLUTE,
+ },
+ 'temperature_celsius': {
+ 'options': [None, 'Temperature', 'celsius', 'temperature', 'smartd_log.temperature_celsius', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR194, ATTR_TEMPERATURE],
+ 'algo': ABSOLUTE,
+ },
+ 'reallocated_sectors_count': {
+ 'options': [None, 'Reallocated Sectors Count', 'sectors', 'wear', 'smartd_log.reallocated_sectors_count',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR5],
+ 'algo': ABSOLUTE,
+ },
+ 'reserved_block_count': {
+ 'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR170],
+ 'algo': ABSOLUTE,
+ },
+ 'program_fail_count': {
+ 'options': [None, 'Program Fail Count', 'errors', 'wear', 'smartd_log.program_fail_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR171],
+ 'algo': INCREMENTAL,
+ },
+ 'erase_fail_count': {
+ 'options': [None, 'Erase Fail Count', 'failures', 'wear', 'smartd_log.erase_fail_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR172],
+ 'algo': INCREMENTAL,
+ },
+ 'wear_leveller_worst_case_erase_count': {
+ 'options': [None, 'Wear Leveller Worst Case Erase Count', 'erases', 'wear',
+ 'smartd_log.wear_leveller_worst_case_erase_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR173],
+ 'algo': ABSOLUTE,
+ },
+ 'unused_reserved_nand_blocks': {
+ 'options': [None, 'Unused Reserved NAND Blocks', 'blocks', 'wear', 'smartd_log.unused_reserved_nand_blocks',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR180],
+ 'algo': ABSOLUTE,
+ },
+ 'reallocation_event_count': {
+ 'options': [None, 'Reallocation Event Count', 'events', 'wear', 'smartd_log.reallocation_event_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR196],
+ 'algo': INCREMENTAL,
+ },
+ 'current_pending_sector_count': {
+ 'options': [None, 'Current Pending Sector Count', 'sectors', 'wear', 'smartd_log.current_pending_sector_count',
+ 'line'],
+ 'lines': [],
+ 'attrs': [ATTR197],
+ 'algo': ABSOLUTE,
+ },
+ 'offline_uncorrectable_sector_count': {
+ 'options': [None, 'Offline Uncorrectable Sector Count', 'sectors', 'wear',
+ 'smartd_log.offline_uncorrectable_sector_count', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR198],
+ 'algo': ABSOLUTE,
+
+ },
+ 'percent_lifetime_used': {
+ 'options': [None, 'Percent Lifetime Used', 'percentage', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR202],
+ 'algo': ABSOLUTE,
+ },
+ 'media_wearout_indicator': {
+ 'options': [None, 'Media Wearout Indicator', 'percentage', 'wear', 'smartd_log.media_wearout_indicator', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR233, ATTR177],
+ 'algo': ABSOLUTE,
+ },
+ 'nand_writes_1gib': {
+ 'options': [None, 'NAND Writes', 'GiB', 'wear', 'smartd_log.nand_writes_1gib', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR249],
+ 'algo': ABSOLUTE,
+ },
+ 'total_lbas_written': {
+ 'options': [None, 'Total LBAs Written', 'sectors', 'wear', 'smartd_log.total_lbas_written', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR241],
+ 'algo': ABSOLUTE,
+ },
+ 'total_lbas_read': {
+ 'options': [None, 'Total LBAs Read', 'sectors', 'wear', 'smartd_log.total_lbas_read', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR242],
+ 'algo': ABSOLUTE,
+ },
+}
+
+# NOTE: 'parse_temp' decodes ATA 194 raw value. Not heavily tested. Written by @Ferroin
+# C code:
+# https://github.com/smartmontools/smartmontools/blob/master/smartmontools/atacmds.cpp#L2051
+#
+# Calling 'parse_temp' on the raw value will return a 4-tuple, containing
+# * temperature
+# * minimum
+# * maximum
+# * over-temperature count
+# substituting None for values it can't decode.
+#
+# Example:
+# >>> parse_temp(42952491042)
+# >>> (34, 10, 43, None)
+#
+#
+# def check_temp_word(i):
+# if i <= 0x7F:
+# return 0x11
+# elif i <= 0xFF:
+# return 0x01
+# elif 0xFF80 <= i:
+# return 0x10
+# return 0x00
+#
+#
+# def check_temp_range(t, b0, b1):
+# if b0 > b1:
+# t0, t1 = b1, b0
+# else:
+# t0, t1 = b0, b1
+#
+# if all([
+# -60 <= t0,
+# t0 <= t,
+# t <= t1,
+# t1 <= 120,
+# not (t0 == -1 and t1 <= 0)
+# ]):
+# return t0, t1
+# return None, None
+#
+#
+# def parse_temp(raw):
+# byte = list()
+# word = list()
+# for i in range(0, 6):
+# byte.append(0xFF & (raw >> (i * 8)))
+# for i in range(0, 3):
+# word.append(0xFFFF & (raw >> (i * 16)))
+#
+# ctwd = check_temp_word(word[0])
+#
+# if not word[2]:
+# if ctwd and not word[1]:
+# # byte[0] is temp, no other data
+# return byte[0], None, None, None
+#
+# if ctwd and all(check_temp_range(byte[0], byte[2], byte[3])):
+# # byte[0] is temp, byte[2] is max or min, byte[3] is min or max
+# trange = check_temp_range(byte[0], byte[2], byte[3])
+# return byte[0], trange[0], trange[1], None
+#
+# if ctwd and all(check_temp_range(byte[0], byte[1], byte[2])):
+# # byte[0] is temp, byte[1] is max or min, byte[2] is min or max
+# trange = check_temp_range(byte[0], byte[1], byte[2])
+# return byte[0], trange[0], trange[1], None
+#
+# return None, None, None, None
+#
+# if ctwd:
+# if all(
+# [
+# ctwd & check_temp_word(word[1]) & check_temp_word(word[2]) != 0x00,
+# all(check_temp_range(byte[0], byte[2], byte[4])),
+# ]
+# ):
+# # byte[0] is temp, byte[2] is max or min, byte[4] is min or max
+# trange = check_temp_range(byte[0], byte[2], byte[4])
+# return byte[0], trange[0], trange[1], None
+# else:
+# trange = check_temp_range(byte[0], byte[2], byte[3])
+# if word[2] < 0x7FFF and all(trange) and trange[1] >= 40:
+# # byte[0] is temp, byte[2] is max or min, byte[3] is min or max, word[2] is overtemp count
+# return byte[0], trange[0], trange[1], word[2]
+# # no data
+# return None, None, None, None
+
+
+CHARTED_ATTRS = dict((attr, k) for k, v in CHARTS.items() for attr in v['attrs'])
+
+
+class BaseAtaSmartAttribute:
+ def __init__(self, name, normalized_value, raw_value):
+ self.name = name
+ self.normalized_value = normalized_value
+ self.raw_value = raw_value
+
+ def value(self):
+ raise NotImplementedError
+
+
+class AtaRaw(BaseAtaSmartAttribute):
+ def value(self):
+ return self.raw_value
+
+
+class AtaNormalized(BaseAtaSmartAttribute):
+ def value(self):
+ return self.normalized_value
+
+
+class Ata3(BaseAtaSmartAttribute):
+ def value(self):
+ value = int(self.raw_value)
+ # https://github.com/netdata/netdata/issues/5919
+ #
+ # 3;151;38684000679;
+ # 423 (Average 447)
+ # 38684000679 & 0xFFF -> 423
+ # (38684000679 & 0xFFF0000) >> 16 -> 447
+ if value > 1e6:
+ return value & 0xFFF
+ return value
+
+
+class Ata9(BaseAtaSmartAttribute):
+ def value(self):
+ value = int(self.raw_value)
+ if value > 1e6:
+ return value & 0xFFFF
+ return value
+
+
+class Ata190(BaseAtaSmartAttribute):
+ def value(self):
+ return 100 - int(self.normalized_value)
+
+
+class Ata194(BaseAtaSmartAttribute):
+ # https://github.com/netdata/netdata/issues/3041
+ # https://github.com/netdata/netdata/issues/5919
+ #
+ # The low byte is the current temperature, the third lowest is the maximum, and the fifth lowest is the minimum
+ def value(self):
+ value = int(self.raw_value)
+ if value > 1e6:
+ return value & 0xFF
+ return min(int(self.normalized_value), int(self.raw_value))
+
+
+class BaseSCSISmartAttribute:
+ def __init__(self, name, raw_value):
+ self.name = name
+ self.raw_value = raw_value
+
+ def value(self):
+ raise NotImplementedError
+
+
+class SCSIRaw(BaseSCSISmartAttribute):
+ def value(self):
+ return self.raw_value
+
+
+def ata_attribute_factory(value):
+ name = value[0]
+
+ if name == ATTR3:
+ return Ata3(*value)
+ elif name == ATTR9:
+ return Ata9(*value)
+ elif name == ATTR190:
+ return Ata190(*value)
+ elif name == ATTR194:
+ return Ata194(*value)
+ elif name in [
+ ATTR1,
+ ATTR7,
+ ATTR177,
+ ATTR202,
+ ATTR206,
+ ATTR233,
+ ]:
+ return AtaNormalized(*value)
+
+ return AtaRaw(*value)
+
+
+def scsi_attribute_factory(value):
+ return SCSIRaw(*value)
+
+
+def attribute_factory(value):
+ name = value[0]
+ if name.isdigit():
+ return ata_attribute_factory(value)
+ return scsi_attribute_factory(value)
+
+
+def handle_error(*errors):
+ def on_method(method):
+ def on_call(*args):
+ try:
+ return method(*args)
+ except errors:
+ return None
+
+ return on_call
+
+ return on_method
+
+
+class DiskLogFile:
+ def __init__(self, full_path):
+ self.path = full_path
+ self.size = os.path.getsize(full_path)
+
+ @handle_error(OSError)
+ def is_changed(self):
+ return self.size != os.path.getsize(self.path)
+
+ @handle_error(OSError)
+ def is_active(self, current_time, limit):
+ return (current_time - os.path.getmtime(self.path)) / 60 < limit
+
+ @handle_error(OSError)
+ def read(self):
+ self.size = os.path.getsize(self.path)
+ return read_last_line(self.path)
+
+
+class BaseDisk:
+ def __init__(self, name, log_file):
+ self.raw_name = name
+ self.name = re.sub(r'_+', '_', name)
+ self.log_file = log_file
+ self.attrs = list()
+ self.alive = True
+ self.charted = False
+
+ def __eq__(self, other):
+ if isinstance(other, BaseDisk):
+ return self.raw_name == other.raw_name
+ return self.raw_name == other
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def parser(self, data):
+ raise NotImplementedError
+
+ @handle_error(TypeError)
+ def populate_attrs(self):
+ self.attrs = list()
+ line = self.log_file.read()
+ for value in self.parser(line):
+ self.attrs.append(attribute_factory(value))
+
+ return len(self.attrs)
+
+ def data(self):
+ data = dict()
+ for attr in self.attrs:
+ data['{0}_{1}'.format(self.name, attr.name)] = attr.value()
+ return data
+
+
+class ATADisk(BaseDisk):
+ def parser(self, data):
+ return RE_ATA.findall(data)
+
+
+class SCSIDisk(BaseDisk):
+ def parser(self, data):
+ return RE_SCSI.findall(data)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.log_path = configuration.get('log_path', DEF_PATH)
+ self.age = configuration.get('age', DEF_AGE)
+ self.exclude = configuration.get('exclude_disks', str()).split()
+ self.disks = list()
+ self.runs = 0
+ self.do_force_rescan = False
+
+ def check(self):
+ return self.scan() > 0
+
+ def get_data(self):
+ self.runs += 1
+
+ if self.do_force_rescan or self.runs % DEF_RESCAN_INTERVAL == 0:
+ self.cleanup()
+ self.scan()
+ self.do_force_rescan = False
+
+ data = dict()
+
+ for disk in self.disks:
+ if not disk.alive:
+ continue
+
+ if not disk.charted:
+ self.add_disk_to_charts(disk)
+
+ changed = disk.log_file.is_changed()
+
+ if changed is None:
+ disk.alive = False
+ self.do_force_rescan = True
+ continue
+
+ if changed and disk.populate_attrs() is None:
+ disk.alive = False
+ self.do_force_rescan = True
+ continue
+
+ data.update(disk.data())
+
+ return data
+
+ def cleanup(self):
+ current_time = time()
+ for disk in self.disks[:]:
+ if any(
+ [
+ not disk.alive,
+ not disk.log_file.is_active(current_time, self.age),
+ ]
+ ):
+ self.disks.remove(disk.raw_name)
+ self.remove_disk_from_charts(disk)
+
+ def scan(self):
+ self.debug('scanning {0}'.format(self.log_path))
+ current_time = time()
+
+ for full_name in os.listdir(self.log_path):
+ disk = self.create_disk_from_file(full_name, current_time)
+ if not disk:
+ continue
+ self.disks.append(disk)
+
+ return len(self.disks)
+
+ def create_disk_from_file(self, full_name, current_time):
+ if not full_name.endswith(CSV):
+ self.debug('skipping {0}: not a csv file'.format(full_name))
+ return None
+
+ name = os.path.basename(full_name).split('.')[-3]
+ path = os.path.join(self.log_path, full_name)
+
+ if name in self.disks:
+ self.debug('skipping {0}: already in disks'.format(full_name))
+ return None
+
+ if [p for p in self.exclude if p in name]:
+ self.debug('skipping {0}: filtered by `exclude` option'.format(full_name))
+ return None
+
+ if not os.access(path, os.R_OK):
+ self.debug('skipping {0}: not readable'.format(full_name))
+ return None
+
+ if os.path.getsize(path) == 0:
+ self.debug('skipping {0}: zero size'.format(full_name))
+ return None
+
+ if (current_time - os.path.getmtime(path)) / 60 > self.age:
+ self.debug('skipping {0}: haven\'t been updated for last {1} minutes'.format(full_name, self.age))
+ return None
+
+ if ATA in full_name:
+ disk = ATADisk(name, DiskLogFile(path))
+ elif SCSI in full_name:
+ disk = SCSIDisk(name, DiskLogFile(path))
+ else:
+ self.debug('skipping {0}: unknown type'.format(full_name))
+ return None
+
+ disk.populate_attrs()
+ if not disk.attrs:
+ self.error('skipping {0}: parsing failed'.format(full_name))
+ return None
+
+ self.debug('added {0}'.format(full_name))
+ return disk
+
+ def add_disk_to_charts(self, disk):
+ if len(self.charts) == 0 or disk.charted:
+ return
+ disk.charted = True
+
+ for attr in disk.attrs:
+ chart_id = CHARTED_ATTRS.get(attr.name)
+
+ if not chart_id or chart_id not in self.charts:
+ continue
+
+ chart = self.charts[chart_id]
+ dim = [
+ '{0}_{1}'.format(disk.name, attr.name),
+ disk.name,
+ CHARTS[chart_id]['algo'],
+ ]
+
+ if dim[0] in self.charts[chart_id].dimensions:
+ chart.hide_dimension(dim[0], reverse=True)
+ else:
+ chart.add_dimension(dim)
+
+ def remove_disk_from_charts(self, disk):
+ if len(self.charts) == 0 or not disk.charted:
+ return
+
+ for attr in disk.attrs:
+ chart_id = CHARTED_ATTRS.get(attr.name)
+
+ if not chart_id or chart_id not in self.charts:
+ continue
+
+ self.charts[chart_id].del_dimension('{0}_{1}'.format(disk.name, attr.name))
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
new file mode 100644
index 00000000..3e81317f
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf
@@ -0,0 +1,76 @@
+# netdata python.d.plugin configuration for smartd log
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, smartd_log also supports the following:
+#
+# log_path: '/path/to/smartd_logs' # path to smartd log files. Default is /var/log/smartd
+# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it.
+# age: 30 # time in minutes since the last dump to file. If smartd has not dumped data within this time the job exits.
+#
+# ----------------------------------------------------------------------
+
+custom:
+ name: smartd_log
+ log_path: '/var/log/smartd/'
+
+debian:
+ name: smartd_log
+ log_path: '/var/lib/smartmontools/'
diff --git a/collectors/python.d.plugin/spigotmc/Makefile.inc b/collectors/python.d.plugin/spigotmc/Makefile.inc
new file mode 100644
index 00000000..f9fa8b6b
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += spigotmc/spigotmc.chart.py
+dist_pythonconfig_DATA += spigotmc/spigotmc.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += spigotmc/README.md spigotmc/Makefile.inc
+
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
new file mode 120000
index 00000000..66e5c9c4
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -0,0 +1 @@
+integrations/spigotmc.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md b/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
new file mode 100644
index 00000000..55ec8fa2
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
@@ -0,0 +1,216 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/spigotmc/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/spigotmc/metadata.yaml"
+sidebar_label: "SpigotMC"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Gaming"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SpigotMC
+
+
+<img src="https://netdata.cloud/img/spigot.jfif" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: spigotmc
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.
+
+
+It sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.
+
+
+This collector is only supported on the following platforms:
+
+- Linux
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per SpigotMC instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |
+| spigotmc.users | Users | users |
+| spigotmc.mem | used, allocated, max | MiB |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable the Remote Console Protocol
+
+Under your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.
+
+This will allow the Server to listen and respond to queries over the rcon protocol.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/spigotmc.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/spigotmc.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| host | The host's IP to connect to. | localhost | yes |
+| port | The port the remote console is listening on. | 25575 | yes |
+| password | Remote console password if any. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic configuration example.
+
+```yaml
+local:
+ name: local_server
+ url: 127.0.0.1
+ port: 25575
+
+```
+##### Basic Authentication
+
+An example using basic password for authentication with the remote console.
+
+<details><summary>Config</summary>
+
+```yaml
+local:
+ name: local_server_pass
+ url: 127.0.0.1
+ port: 25575
+ password: 'foobar'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+local_server:
+ name : my_local_server
+ url : 127.0.0.1
+ port: 25575
+
+remote_server:
+ name : another_remote_server
+ url : 192.0.2.1
+ port: 25575
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin spigotmc debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/spigotmc/metadata.yaml b/collectors/python.d.plugin/spigotmc/metadata.yaml
new file mode 100644
index 00000000..5dea9f0c
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/metadata.yaml
@@ -0,0 +1,176 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: spigotmc
+ monitored_instance:
+ name: SpigotMC
+ link: ""
+ categories:
+ - data-collection.gaming
+ icon_filename: "spigot.jfif"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - minecraft server
+ - spigotmc server
+ - spigot
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.
+ method_description: |
+ It sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: By default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable the Remote Console Protocol
+ description: |
+ Under your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.
+
+ This will allow the Server to listen and respond to queries over the rcon protocol.
+ configuration:
+ file:
+ name: "python.d/spigotmc.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 1
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: >
+ Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed
+ running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: host
+ description: The host's IP to connect to.
+ default_value: localhost
+ required: true
+ - name: port
+ description: The port the remote console is listening on.
+ default_value: 25575
+ required: true
+ - name: password
+ description: Remote console password if any.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic configuration example.
+ folding:
+ enabled: false
+ config: |
+ local:
+ name: local_server
+ url: 127.0.0.1
+ port: 25575
+ - name: Basic Authentication
+ description: An example using basic password for authentication with the remote console.
+ config: |
+ local:
+ name: local_server_pass
+ url: 127.0.0.1
+ port: 25575
+ password: 'foobar'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ local_server:
+ name : my_local_server
+ url : 127.0.0.1
+ port: 25575
+
+ remote_server:
+ name : another_remote_server
+ url : 192.0.2.1
+ port: 25575
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: spigotmc.tps
+ description: Spigot Ticks Per Second
+ unit: "ticks"
+ chart_type: line
+ dimensions:
+ - name: 1 Minute Average
+ - name: 5 Minute Average
+ - name: 15 Minute Average
+ - name: spigotmc.users
+ description: Minecraft Users
+ unit: "users"
+ chart_type: area
+ dimensions:
+ - name: Users
+ - name: spigotmc.mem
+ description: Minecraft Memory Usage
+ unit: "MiB"
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: allocated
+ - name: max
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
new file mode 100644
index 00000000..81370fb4
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
@@ -0,0 +1,184 @@
+# -*- coding: utf-8 -*-
+# Description: spigotmc netdata python.d module
+# Author: Austin S. Hemmelgarn (Ferroin)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import platform
+import re
+import socket
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from third_party import mcrcon
+
+# Update only every 5 seconds because collection takes in excess of
+# 100ms sometimes, and most people won't care about second-by-second data.
+update_every = 5
+
+PRECISION = 100
+
+COMMAND_TPS = 'tps'
+COMMAND_LIST = 'list'
+COMMAND_ONLINE = 'online'
+
+ORDER = [
+ 'tps',
+ 'mem',
+ 'users',
+]
+
+CHARTS = {
+ 'tps': {
+ 'options': [None, 'Spigot Ticks Per Second', 'ticks', 'spigotmc', 'spigotmc.tps', 'line'],
+ 'lines': [
+ ['tps1', '1 Minute Average', 'absolute', 1, PRECISION],
+ ['tps5', '5 Minute Average', 'absolute', 1, PRECISION],
+ ['tps15', '15 Minute Average', 'absolute', 1, PRECISION]
+ ]
+ },
+ 'users': {
+ 'options': [None, 'Minecraft Users', 'users', 'spigotmc', 'spigotmc.users', 'area'],
+ 'lines': [
+ ['users', 'Users', 'absolute', 1, 1]
+ ]
+ },
+ 'mem': {
+ 'options': [None, 'Minecraft Memory Usage', 'MiB', 'spigotmc', 'spigotmc.mem', 'line'],
+ 'lines': [
+ ['mem_used', 'used', 'absolute', 1, 1],
+ ['mem_alloc', 'allocated', 'absolute', 1, 1],
+ ['mem_max', 'max', 'absolute', 1, 1]
+ ]
+ }
+}
+
+_TPS_REGEX = re.compile(
+ # Examples:
+ # §6TPS from last 1m, 5m, 15m: §a*20.0, §a*20.0, §a*20.0
+ # §6Current Memory Usage: §a936/65536 mb (Max: 65536 mb)
+ r'^.*: .*?' # Message lead-in
+ r'(\d{1,2}.\d+), .*?' # 1-minute TPS value
+ r'(\d{1,2}.\d+), .*?' # 5-minute TPS value
+ r'(\d{1,2}\.\d+).*?' # 15-minute TPS value
+ r'(\s.*?(\d+)\/(\d+).*?: (\d+).*)?', # Current Memory Usage / Total Memory (Max Memory)
+ re.MULTILINE
+)
+_LIST_REGEX = re.compile(
+ # Examples:
+ # There are 4 of a max 50 players online: player1, player2, player3, player4
+ # §6There are §c4§6 out of maximum §c50§6 players online.
+ # §6There are §c3§6/§c1§6 out of maximum §c50§6 players online.
+ # §6当前有 §c4§6 个玩家在线,最大在线人数为 §c50§6 个玩家.
+ # §c4§6 人のプレイヤーが接続中です。最大接続可能人数\:§c 50
+ r'[^§](\d+)(?:.*?(?=/).*?[^§](\d+))?', # Current user count.
+ re.X
+)
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 25575)
+ self.password = self.configuration.get('password', '')
+ self.console = mcrcon.MCRcon()
+ self.alive = True
+
+ def check(self):
+ if platform.system() != 'Linux':
+ self.error('Only supported on Linux.')
+ return False
+ try:
+ self.connect()
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+
+ return self._get_data()
+
+ def connect(self):
+ self.console.connect(self.host, self.port, self.password)
+
+ def reconnect(self):
+ self.error('try reconnect.')
+ try:
+ try:
+ self.console.disconnect()
+ except mcrcon.MCRconException:
+ pass
+ self.console.connect(self.host, self.port, self.password)
+ self.alive = True
+ except (mcrcon.MCRconException, socket.error) as err:
+ self.error('Error connecting.')
+ self.error(repr(err))
+ return False
+ return True
+
+ def is_alive(self):
+ if any(
+ [
+ not self.alive,
+ self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1
+ ]
+ ):
+ return self.reconnect()
+ return True
+
+ def _get_data(self):
+ if not self.is_alive():
+ return None
+
+ data = {}
+
+ try:
+ raw = self.console.command(COMMAND_TPS)
+ match = _TPS_REGEX.match(raw)
+ if match:
+ data['tps1'] = int(float(match.group(1)) * PRECISION)
+ data['tps5'] = int(float(match.group(2)) * PRECISION)
+ data['tps15'] = int(float(match.group(3)) * PRECISION)
+ if match.group(4):
+ data['mem_used'] = int(match.group(5))
+ data['mem_alloc'] = int(match.group(6))
+ data['mem_max'] = int(match.group(7))
+ else:
+ self.error('Unable to process TPS values.')
+ if not raw:
+ self.error(
+ "'{0}' command returned no value, make sure you set correct password".format(COMMAND_TPS))
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch TPS values.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+
+ try:
+ raw = self.console.command(COMMAND_LIST)
+ match = _LIST_REGEX.search(raw)
+ if not match:
+ raw = self.console.command(COMMAND_ONLINE)
+ match = _LIST_REGEX.search(raw)
+ if match:
+ users = int(match.group(1))
+ hidden_users = match.group(2)
+ if hidden_users:
+ hidden_users = int(hidden_users)
+ else:
+ hidden_users = 0
+ data['users'] = users + hidden_users
+ else:
+ if not raw:
+ self.error("'{0}' and '{1}' commands returned no value, make sure you set correct password".format(
+ COMMAND_LIST, COMMAND_ONLINE))
+ self.error('Unable to process user counts.')
+ except mcrcon.MCRconException:
+ self.error('Unable to fetch user counts.')
+ except socket.error:
+ self.error('Connection is dead.')
+ self.alive = False
+ return None
+
+ return data
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
new file mode 100644
index 00000000..f0064ea2
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for spigotmc
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# In addition to the above, spigotmc supports the following:
+#
+# host: localhost # The host to connect to. Defaults to the local system.
+# port: 25575 # The port the remote console is listening on.
+# password: '' # The remote console password. Most be set correctly.
diff --git a/collectors/python.d.plugin/squid/Makefile.inc b/collectors/python.d.plugin/squid/Makefile.inc
new file mode 100644
index 00000000..76ecff81
--- /dev/null
+++ b/collectors/python.d.plugin/squid/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += squid/squid.chart.py
+dist_pythonconfig_DATA += squid/squid.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += squid/README.md squid/Makefile.inc
+
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
new file mode 120000
index 00000000..c4e5a03d
--- /dev/null
+++ b/collectors/python.d.plugin/squid/README.md
@@ -0,0 +1 @@
+integrations/squid.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/squid/integrations/squid.md b/collectors/python.d.plugin/squid/integrations/squid.md
new file mode 100644
index 00000000..6599826d
--- /dev/null
+++ b/collectors/python.d.plugin/squid/integrations/squid.md
@@ -0,0 +1,199 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/squid/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/squid/metadata.yaml"
+sidebar_label: "Squid"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Squid
+
+
+<img src="https://netdata.cloud/img/squid.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: squid
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.
+
+
+It collects metrics from the endpoint where Squid exposes its `counters` data.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Squid instance
+
+These metrics refer to each monitored Squid instance.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| squid.clients_net | in, out, hits | kilobits/s |
+| squid.clients_requests | requests, hits, errors | requests/s |
+| squid.servers_net | in, out | kilobits/s |
+| squid.servers_requests | requests, errors | requests/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure Squid's Cache Manager
+
+Take a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/squid.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/squid.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
+| host | The host to connect to. | | yes |
+| port | The port to connect to. | | yes |
+| request | The URL to request from Squid. | | yes |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic configuration example.
+
+```yaml
+example_job_name:
+ name: 'local'
+ host: 'localhost'
+ port: 3128
+ request: 'cache_object://localhost:3128/counters'
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+local_job:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 3128
+ request: 'cache_object://127.0.0.1:3128/counters'
+
+remote_job:
+ name: 'remote'
+ host: '192.0.2.1'
+ port: 3128
+ request: 'cache_object://192.0.2.1:3128/counters'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `squid` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin squid debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/squid/metadata.yaml b/collectors/python.d.plugin/squid/metadata.yaml
new file mode 100644
index 00000000..d0c5b3ec
--- /dev/null
+++ b/collectors/python.d.plugin/squid/metadata.yaml
@@ -0,0 +1,174 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: squid
+ monitored_instance:
+ name: Squid
+ link: "http://www.squid-cache.org/"
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: "squid.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - squid
+ - web delivery
+ - squid caching proxy
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.
+ method_description: "It collects metrics from the endpoint where Squid exposes its `counters` data."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "By default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Configure Squid's Cache Manager
+ description: |
+ Take a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.
+ configuration:
+ file:
+ name: "python.d/squid.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 1
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: >
+ Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: "local"
+ required: false
+ - name: host
+ description: The host to connect to.
+ default_value: ""
+ required: true
+ - name: port
+ description: The port to connect to.
+ default_value: ""
+ required: true
+ - name: request
+ description: The URL to request from Squid.
+ default_value: ""
+ required: true
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic configuration example.
+ folding:
+ enabled: false
+ config: |
+ example_job_name:
+ name: 'local'
+ host: 'localhost'
+ port: 3128
+ request: 'cache_object://localhost:3128/counters'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ local_job:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 3128
+ request: 'cache_object://127.0.0.1:3128/counters'
+
+ remote_job:
+ name: 'remote'
+ host: '192.0.2.1'
+ port: 3128
+ request: 'cache_object://192.0.2.1:3128/counters'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: Squid instance
+ description: "These metrics refer to each monitored Squid instance."
+ labels: []
+ metrics:
+ - name: squid.clients_net
+ description: Squid Client Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: hits
+ - name: squid.clients_requests
+ description: Squid Client Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: hits
+ - name: errors
+ - name: squid.servers_net
+ description: Squid Server Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: squid.servers_requests
+ description: Squid Server Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: errors
diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
new file mode 100644
index 00000000..bcae2d89
--- /dev/null
+++ b/collectors/python.d.plugin/squid/squid.chart.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+# Description: squid netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from bases.FrameworkServices.SocketService import SocketService
+
+ORDER = [
+ 'clients_net',
+ 'clients_requests',
+ 'servers_net',
+ 'servers_requests',
+]
+
+CHARTS = {
+ 'clients_net': {
+ 'options': [None, 'Squid Client Bandwidth', 'kilobits/s', 'clients', 'squid.clients_net', 'area'],
+ 'lines': [
+ ['client_http_kbytes_in', 'in', 'incremental', 8, 1],
+ ['client_http_kbytes_out', 'out', 'incremental', -8, 1],
+ ['client_http_hit_kbytes_out', 'hits', 'incremental', -8, 1]
+ ]
+ },
+ 'clients_requests': {
+ 'options': [None, 'Squid Client Requests', 'requests/s', 'clients', 'squid.clients_requests', 'line'],
+ 'lines': [
+ ['client_http_requests', 'requests', 'incremental'],
+ ['client_http_hits', 'hits', 'incremental'],
+ ['client_http_errors', 'errors', 'incremental', -1, 1]
+ ]
+ },
+ 'servers_net': {
+ 'options': [None, 'Squid Server Bandwidth', 'kilobits/s', 'servers', 'squid.servers_net', 'area'],
+ 'lines': [
+ ['server_all_kbytes_in', 'in', 'incremental', 8, 1],
+ ['server_all_kbytes_out', 'out', 'incremental', -8, 1]
+ ]
+ },
+ 'servers_requests': {
+ 'options': [None, 'Squid Server Requests', 'requests/s', 'servers', 'squid.servers_requests', 'line'],
+ 'lines': [
+ ['server_all_requests', 'requests', 'incremental'],
+ ['server_all_errors', 'errors', 'incremental', -1, 1]
+ ]
+ }
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ SocketService.__init__(self, configuration=configuration, name=name)
+ self._keep_alive = True
+ self.request = ''
+ self.host = 'localhost'
+ self.port = 3128
+ self.order = ORDER
+ self.definitions = CHARTS
+
+ def _get_data(self):
+ """
+ Get data via http request
+ :return: dict
+ """
+ response = self._get_raw_data()
+
+ data = dict()
+ try:
+ raw = ''
+ for tmp in response.split('\r\n'):
+ if tmp.startswith('sample_time'):
+ raw = tmp
+ break
+
+ if raw.startswith('<'):
+ self.error('invalid data received')
+ return None
+
+ for row in raw.split('\n'):
+ if row.startswith(('client', 'server.all')):
+ tmp = row.split('=')
+ data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
+
+ except (ValueError, AttributeError, TypeError):
+ self.error('invalid data received')
+ return None
+
+ if not data:
+ self.error('no data received')
+ return None
+ return data
+
+ def _check_raw_data(self, data):
+ header = data[:1024].lower()
+
+ if 'connection: keep-alive' in header:
+ self._keep_alive = True
+ else:
+ self._keep_alive = False
+
+ if data[-7:] == '\r\n0\r\n\r\n' and 'transfer-encoding: chunked' in header: # HTTP/1.1 response
+ self.debug('received full response from squid')
+ return True
+
+ self.debug('waiting more data from squid')
+ return False
+
+ def check(self):
+ """
+ Parse essential configuration, autodetect squid configuration (if needed), and check if data is available
+ :return: boolean
+ """
+ self._parse_config()
+ # format request
+ req = self.request.decode()
+ if not req.startswith('GET'):
+ req = 'GET ' + req
+ if not req.endswith(' HTTP/1.1\r\n\r\n'):
+ req += ' HTTP/1.1\r\n\r\n'
+ self.request = req.encode()
+ if self._get_data() is not None:
+ return True
+ else:
+ return False
diff --git a/collectors/python.d.plugin/squid/squid.conf b/collectors/python.d.plugin/squid/squid.conf
new file mode 100644
index 00000000..b90a52c0
--- /dev/null
+++ b/collectors/python.d.plugin/squid/squid.conf
@@ -0,0 +1,167 @@
+# netdata python.d.plugin configuration for squid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, squid also supports the following:
+#
+# host : 'IP or HOSTNAME' # the host to connect to
+# port : PORT # the port to connect to
+# request: 'URL' # the URL to request from squid
+#
+
+# ----------------------------------------------------------------------
+# SQUID CONFIGURATION
+#
+# See:
+# http://wiki.squid-cache.org/Features/CacheManager
+#
+# In short, add to your squid configuration these:
+#
+# http_access allow localhost manager
+# http_access deny manager
+#
+# To remotely monitor a squid:
+#
+# acl managerAdmin src 192.0.2.1
+# http_access allow localhost manager
+# http_access allow managerAdmin manager
+# http_access deny manager
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp3128old:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : 'cache_object://localhost:3128/counters'
+
+tcp8080old:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : 'cache_object://localhost:3128/counters'
+
+tcp3128new:
+ name : 'local'
+ host : 'localhost'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080new:
+ name : 'local'
+ host : 'localhost'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv4
+
+tcp3128oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp8080oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp3128newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv6
+
+tcp3128oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp8080oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp3128newipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080newipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
diff --git a/collectors/python.d.plugin/tomcat/Makefile.inc b/collectors/python.d.plugin/tomcat/Makefile.inc
new file mode 100644
index 00000000..940a7835
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += tomcat/tomcat.chart.py
+dist_pythonconfig_DATA += tomcat/tomcat.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc
+
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
new file mode 120000
index 00000000..997090c3
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -0,0 +1 @@
+integrations/tomcat.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/tomcat/integrations/tomcat.md b/collectors/python.d.plugin/tomcat/integrations/tomcat.md
new file mode 100644
index 00000000..883f29dd
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/integrations/tomcat.md
@@ -0,0 +1,203 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tomcat/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tomcat/metadata.yaml"
+sidebar_label: "Tomcat"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tomcat
+
+
+<img src="https://netdata.cloud/img/tomcat.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: tomcat
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Tomcat metrics about bandwidth, processing time, threads and more.
+
+
+It parses the information provided by the http endpoint of the `/manager/status` in XML format
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+You need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint
+
+### Default Behavior
+
+#### Auto-Detection
+
+If the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail.
+
+#### Limits
+
+This module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Tomcat instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| tomcat.accesses | accesses, errors | requests/s |
+| tomcat.bandwidth | sent, received | KiB/s |
+| tomcat.processing_time | processing time | seconds |
+| tomcat.threads | current, busy | current threads |
+| tomcat.jvm | free, eden, survivor, tenured, code cache, compressed, metaspace | MiB |
+| tomcat.jvm_eden | used, committed, max | MiB |
+| tomcat.jvm_survivor | used, committed, max | MiB |
+| tomcat.jvm_tenured | used, committed, max | MiB |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Create a read-only `netdata` user, to monitor the `/status` endpoint.
+
+This is necessary for configuring the collector.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/tomcat.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/tomcat.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options per job</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes |
+| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no |
+| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no |
+| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://localhost:8080/manager/status?XML=true'
+
+```
+##### Using an IPv4 endpoint
+
+A typical configuration using an IPv4 endpoint
+
+<details><summary>Config</summary>
+
+```yaml
+local_ipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+
+```
+</details>
+
+##### Using an IPv6 endpoint
+
+A typical configuration using an IPv6 endpoint
+
+<details><summary>Config</summary>
+
+```yaml
+local_ipv6:
+ name : 'local'
+ url : 'http://[::1]:8080/manager/status?XML=true'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `tomcat` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin tomcat debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/tomcat/metadata.yaml b/collectors/python.d.plugin/tomcat/metadata.yaml
new file mode 100644
index 00000000..e6852607
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/metadata.yaml
@@ -0,0 +1,200 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: tomcat
+ monitored_instance:
+ name: Tomcat
+ link: "https://tomcat.apache.org/"
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: "tomcat.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - apache
+ - tomcat
+ - webserver
+ - websocket
+ - jakarta
+ - javaEE
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Tomcat metrics about bandwidth, processing time, threads and more.
+ method_description: |
+ It parses the information provided by the http endpoint of the `/manager/status` in XML format
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "You need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint"
+ default_behavior:
+ auto_detection:
+ description: "If the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail."
+ limits:
+ description: "This module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port"
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Create a read-only `netdata` user, to monitor the `/status` endpoint.
+ description: This is necessary for configuring the collector.
+ configuration:
+ file:
+ name: "python.d/tomcat.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options per job"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: url
+ description: The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true.
+ default_value: no
+ required: true
+ - name: user
+ description: A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected
+ default_value: no
+ required: false
+ - name: pass
+ description: A valid password for the user in question. Required if the endpoint is password protected
+ default_value: no
+ required: false
+ - name: connector_name
+ description: The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration
+ config: |
+ localhost:
+ name : 'local'
+ url : 'http://localhost:8080/manager/status?XML=true'
+ - name: Using an IPv4 endpoint
+ description: A typical configuration using an IPv4 endpoint
+ config: |
+ local_ipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+ - name: Using an IPv6 endpoint
+ description: A typical configuration using an IPv6 endpoint
+ config: |
+ local_ipv6:
+ name : 'local'
+ url : 'http://[::1]:8080/manager/status?XML=true'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: tomcat.accesses
+ description: Requests
+ unit: "requests/s"
+ chart_type: area
+ dimensions:
+ - name: accesses
+ - name: errors
+ - name: tomcat.bandwidth
+ description: Bandwidth
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: sent
+ - name: received
+ - name: tomcat.processing_time
+ description: processing time
+ unit: "seconds"
+ chart_type: area
+ dimensions:
+ - name: processing time
+ - name: tomcat.threads
+ description: Threads
+ unit: "current threads"
+ chart_type: area
+ dimensions:
+ - name: current
+ - name: busy
+ - name: tomcat.jvm
+ description: JVM Memory Pool Usage
+ unit: "MiB"
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: eden
+ - name: survivor
+ - name: tenured
+ - name: code cache
+ - name: compressed
+ - name: metaspace
+ - name: tomcat.jvm_eden
+ description: Eden Memory Usage
+ unit: "MiB"
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: committed
+ - name: max
+ - name: tomcat.jvm_survivor
+ description: Survivor Memory Usage
+ unit: "MiB"
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: committed
+ - name: max
+ - name: tomcat.jvm_tenured
+ description: Tenured Memory Usage
+ unit: "MiB"
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: committed
+ - name: max
diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
new file mode 100644
index 00000000..90315f8c
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
@@ -0,0 +1,199 @@
+# -*- coding: utf-8 -*-
+# Description: tomcat netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+# Author: Wei He (Wing924)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+import xml.etree.ElementTree as ET
+
+from bases.FrameworkServices.UrlService import UrlService
+
+MiB = 1 << 20
+
+# Regex fix for Tomcat single quote XML attributes
+# affecting Tomcat < 8.5.24 & 9.0.2 running with Java > 9
+# cf. https://bz.apache.org/bugzilla/show_bug.cgi?id=61603
+single_quote_regex = re.compile(r"='([^']+)'([^']+)''")
+
+ORDER = [
+ 'accesses',
+ 'bandwidth',
+ 'processing_time',
+ 'threads',
+ 'jvm',
+ 'jvm_eden',
+ 'jvm_survivor',
+ 'jvm_tenured',
+]
+
+CHARTS = {
+ 'accesses': {
+ 'options': [None, 'Requests', 'requests/s', 'statistics', 'tomcat.accesses', 'area'],
+ 'lines': [
+ ['requestCount', 'accesses', 'incremental'],
+ ['errorCount', 'errors', 'incremental'],
+ ]
+ },
+ 'bandwidth': {
+ 'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'],
+ 'lines': [
+ ['bytesSent', 'sent', 'incremental', 1, 1024],
+ ['bytesReceived', 'received', 'incremental', 1, 1024],
+ ]
+ },
+ 'processing_time': {
+ 'options': [None, 'processing time', 'seconds', 'statistics', 'tomcat.processing_time', 'area'],
+ 'lines': [
+ ['processingTime', 'processing time', 'incremental', 1, 1000]
+ ]
+ },
+ 'threads': {
+ 'options': [None, 'Threads', 'current threads', 'statistics', 'tomcat.threads', 'area'],
+ 'lines': [
+ ['currentThreadCount', 'current', 'absolute'],
+ ['currentThreadsBusy', 'busy', 'absolute']
+ ]
+ },
+ 'jvm': {
+ 'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'],
+ 'lines': [
+ ['free', 'free', 'absolute', 1, MiB],
+ ['eden_used', 'eden', 'absolute', 1, MiB],
+ ['survivor_used', 'survivor', 'absolute', 1, MiB],
+ ['tenured_used', 'tenured', 'absolute', 1, MiB],
+ ['code_cache_used', 'code cache', 'absolute', 1, MiB],
+ ['compressed_used', 'compressed', 'absolute', 1, MiB],
+ ['metaspace_used', 'metaspace', 'absolute', 1, MiB],
+ ]
+ },
+ 'jvm_eden': {
+ 'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'],
+ 'lines': [
+ ['eden_used', 'used', 'absolute', 1, MiB],
+ ['eden_committed', 'committed', 'absolute', 1, MiB],
+ ['eden_max', 'max', 'absolute', 1, MiB]
+ ]
+ },
+ 'jvm_survivor': {
+ 'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'],
+ 'lines': [
+ ['survivor_used', 'used', 'absolute', 1, MiB],
+ ['survivor_committed', 'committed', 'absolute', 1, MiB],
+ ['survivor_max', 'max', 'absolute', 1, MiB],
+ ]
+ },
+ 'jvm_tenured': {
+ 'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'],
+ 'lines': [
+ ['tenured_used', 'used', 'absolute', 1, MiB],
+ ['tenured_committed', 'committed', 'absolute', 1, MiB],
+ ['tenured_max', 'max', 'absolute', 1, MiB]
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
+ self.connector_name = self.configuration.get('connector_name', None)
+ self.parse = self.xml_parse
+
+ def xml_parse(self, data):
+ try:
+ return ET.fromstring(data)
+ except ET.ParseError:
+ self.debug('%s is not a valid XML page. Please add "?XML=true" to tomcat status page.' % self.url)
+ return None
+
+ def xml_single_quote_fix_parse(self, data):
+ data = single_quote_regex.sub(r"='\g<1>\g<2>'", data)
+ return self.xml_parse(data)
+
+ def check(self):
+ self._manager = self._build_manager()
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return False
+
+ if single_quote_regex.search(raw_data):
+ self.warning('Tomcat status page is returning invalid single quote XML, please consider upgrading '
+ 'your Tomcat installation. See https://bz.apache.org/bugzilla/show_bug.cgi?id=61603')
+ self.parse = self.xml_single_quote_fix_parse
+
+ return self.parse(raw_data) is not None
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ data = None
+ raw_data = self._get_raw_data()
+ if raw_data:
+ xml = self.parse(raw_data)
+ if xml is None:
+ return None
+
+ data = {}
+
+ jvm = xml.find('jvm')
+
+ connector = None
+ if self.connector_name:
+ for conn in xml.findall('connector'):
+ if self.connector_name in conn.get('name'):
+ connector = conn
+ break
+ else:
+ connector = xml.find('connector')
+
+ memory = jvm.find('memory')
+ data['free'] = memory.get('free')
+ data['total'] = memory.get('total')
+
+ for pool in jvm.findall('memorypool'):
+ name = pool.get('name')
+ if 'Eden Space' in name:
+ data['eden_used'] = pool.get('usageUsed')
+ data['eden_committed'] = pool.get('usageCommitted')
+ data['eden_max'] = pool.get('usageMax')
+ elif 'Survivor Space' in name:
+ data['survivor_used'] = pool.get('usageUsed')
+ data['survivor_committed'] = pool.get('usageCommitted')
+ data['survivor_max'] = pool.get('usageMax')
+ elif 'Tenured Gen' in name or 'Old Gen' in name:
+ data['tenured_used'] = pool.get('usageUsed')
+ data['tenured_committed'] = pool.get('usageCommitted')
+ data['tenured_max'] = pool.get('usageMax')
+ elif name == 'Code Cache':
+ data['code_cache_used'] = pool.get('usageUsed')
+ data['code_cache_committed'] = pool.get('usageCommitted')
+ data['code_cache_max'] = pool.get('usageMax')
+ elif name == 'Compressed':
+ data['compressed_used'] = pool.get('usageUsed')
+ data['compressed_committed'] = pool.get('usageCommitted')
+ data['compressed_max'] = pool.get('usageMax')
+ elif name == 'Metaspace':
+ data['metaspace_used'] = pool.get('usageUsed')
+ data['metaspace_committed'] = pool.get('usageCommitted')
+ data['metaspace_max'] = pool.get('usageMax')
+
+ if connector is not None:
+ thread_info = connector.find('threadInfo')
+ data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy')
+ data['currentThreadCount'] = thread_info.get('currentThreadCount')
+
+ request_info = connector.find('requestInfo')
+ data['processingTime'] = request_info.get('processingTime')
+ data['requestCount'] = request_info.get('requestCount')
+ data['errorCount'] = request_info.get('errorCount')
+ data['bytesReceived'] = request_info.get('bytesReceived')
+ data['bytesSent'] = request_info.get('bytesSent')
+
+ return data or None
diff --git a/collectors/python.d.plugin/tomcat/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf
new file mode 100644
index 00000000..009591bd
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/tomcat.conf
@@ -0,0 +1,89 @@
+# netdata python.d.plugin configuration for tomcat
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, tomcat also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# if you have multiple connectors, the following are supported:
+#
+# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8080/manager/status?XML=true'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]:8080/manager/status?XML=true'
diff --git a/collectors/python.d.plugin/tor/Makefile.inc b/collectors/python.d.plugin/tor/Makefile.inc
new file mode 100644
index 00000000..5a45f9b7
--- /dev/null
+++ b/collectors/python.d.plugin/tor/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += tor/tor.chart.py
+dist_pythonconfig_DATA += tor/tor.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += tor/README.md tor/Makefile.inc
+
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
new file mode 120000
index 00000000..7c20cd40
--- /dev/null
+++ b/collectors/python.d.plugin/tor/README.md
@@ -0,0 +1 @@
+integrations/tor.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/tor/integrations/tor.md b/collectors/python.d.plugin/tor/integrations/tor.md
new file mode 100644
index 00000000..0e57fa79
--- /dev/null
+++ b/collectors/python.d.plugin/tor/integrations/tor.md
@@ -0,0 +1,197 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tor/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tor/metadata.yaml"
+sidebar_label: "Tor"
+learn_status: "Published"
+learn_rel_path: "Data Collection/VPNs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tor
+
+
+<img src="https://netdata.cloud/img/tor.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: tor
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Tor bandwidth traffic .
+
+It connects to the Tor control port to collect traffic statistics.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Tor instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| tor.traffic | read, write | KiB/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Required python module
+
+The `stem` python library needs to be installed.
+
+
+#### Required Tor configuration
+
+Add to /etc/tor/torrc:
+
+ControlPort 9051
+
+For more options please read the manual.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/tor.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/tor.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| control_addr | Tor control IP address | 127.0.0.1 | no |
+| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no |
+| password | Tor control password | | no |
+
+</details>
+
+#### Examples
+
+##### Local TCP
+
+A basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`
+
+<details><summary>Config</summary>
+
+```yaml
+local_tcp:
+ name: 'local'
+ control_port: 9051
+ password: <password> # if required
+
+```
+</details>
+
+##### Local socket
+
+A basic local socket configuration
+
+<details><summary>Config</summary>
+
+```yaml
+local_socket:
+ name: 'local'
+ control_port: '/var/run/tor/control'
+ password: <password> # if required
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `tor` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin tor debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/tor/metadata.yaml b/collectors/python.d.plugin/tor/metadata.yaml
new file mode 100644
index 00000000..8647eca2
--- /dev/null
+++ b/collectors/python.d.plugin/tor/metadata.yaml
@@ -0,0 +1,143 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: tor
+ monitored_instance:
+ name: Tor
+ link: 'https://www.torproject.org/'
+ categories:
+ - data-collection.vpns
+ icon_filename: 'tor.svg'
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ''
+ keywords:
+ - tor
+ - traffic
+ - vpn
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: 'This collector monitors Tor bandwidth traffic .'
+ method_description: 'It connects to the Tor control port to collect traffic statistics.'
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ''
+ default_behavior:
+ auto_detection:
+ description: 'If no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.'
+ limits:
+ description: ''
+ performance_impact:
+ description: ''
+ setup:
+ prerequisites:
+ list:
+ - title: 'Required python module'
+ description: |
+ The `stem` python library needs to be installed.
+ - title: 'Required Tor configuration'
+ description: |
+ Add to /etc/tor/torrc:
+
+ ControlPort 9051
+
+ For more options please read the manual.
+ configuration:
+ file:
+ name: python.d/tor.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ''
+ required: false
+ - name: control_addr
+ description: Tor control IP address
+ default_value: 127.0.0.1
+ required: false
+ - name: control_port
+ description: Tor control port. Can be either a tcp port, or a path to a socket file.
+ default_value: 9051
+ required: false
+ - name: password
+ description: Tor control password
+ default_value: ''
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Local TCP
+ description: A basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`
+ config: |
+ local_tcp:
+ name: 'local'
+ control_port: 9051
+ password: <password> # if required
+ - name: Local socket
+ description: A basic local socket configuration
+ config: |
+ local_socket:
+ name: 'local'
+ control_port: '/var/run/tor/control'
+ password: <password> # if required
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: tor.traffic
+ description: Tor Traffic
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py
new file mode 100644
index 00000000..f7bc2d79
--- /dev/null
+++ b/collectors/python.d.plugin/tor/tor.chart.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+# Description: adaptec_raid netdata python.d module
+# Author: Federico Ceratto <federico.ceratto@gmail.com>
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+try:
+ import stem
+ import stem.connection
+ import stem.control
+
+ STEM_AVAILABLE = True
+except ImportError:
+ STEM_AVAILABLE = False
+
+DEF_PORT = 'default'
+DEF_ADDR = '127.0.0.1'
+
+ORDER = [
+ 'traffic',
+]
+
+CHARTS = {
+ 'traffic': {
+ 'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'],
+ 'lines': [
+ ['read', 'read', 'incremental', 1, 1024],
+ ['write', 'write', 'incremental', 1, -1024],
+ ]
+ }
+}
+
+
+class Service(SimpleService):
+ """Provide netdata service for Tor"""
+
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.port = self.configuration.get('control_port', DEF_PORT)
+ self.addr = self.configuration.get('control_addr', DEF_ADDR)
+ self.password = self.configuration.get('password')
+ self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
+ self.conn = None
+ self.alive = False
+
+ def check(self):
+ if not STEM_AVAILABLE:
+ self.error('the stem library is missing')
+ return False
+
+ return self.connect()
+
+ def get_data(self):
+ if not self.alive and not self.reconnect():
+ return None
+
+ data = dict()
+
+ try:
+ data['read'] = self.conn.get_info('traffic/read')
+ data['write'] = self.conn.get_info('traffic/written')
+ except stem.ControllerError as error:
+ self.debug(error)
+ self.alive = False
+
+ return data or None
+
+ def authenticate(self):
+ try:
+ self.conn.authenticate(password=self.password)
+ except stem.connection.AuthenticationFailure as error:
+ self.error('authentication error: {0}'.format(error))
+ return False
+ return True
+
+ def connect_via_port(self):
+ try:
+ self.conn = stem.control.Controller.from_port(address=self.addr, port=self.port)
+ except (stem.SocketError, ValueError) as error:
+ self.error(error)
+
+ def connect_via_socket(self):
+ try:
+ self.conn = stem.control.Controller.from_socket_file(path=self.port)
+ except (stem.SocketError, ValueError) as error:
+ self.error(error)
+
+ def connect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+
+ if self.use_socket:
+ self.connect_via_socket()
+ else:
+ self.connect_via_port()
+
+ if self.conn and self.authenticate():
+ self.alive = True
+
+ return self.alive
+
+ def reconnect(self):
+ return self.connect()
diff --git a/collectors/python.d.plugin/tor/tor.conf b/collectors/python.d.plugin/tor/tor.conf
new file mode 100644
index 00000000..c7c98dc0
--- /dev/null
+++ b/collectors/python.d.plugin/tor/tor.conf
@@ -0,0 +1,81 @@
+# netdata python.d.plugin configuration for tor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, tor plugin also supports the following:
+#
+# control_addr: 'address' # tor control IP address (defaults to '127.0.0.1')
+# control_port: 'port' # tor control port
+# password: 'password' # tor control password
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+# local_tcp:
+# name: 'local'
+# control_port: 9051
+# control_addr: 127.0.0.1
+# password: <password>
+#
+# local_socket:
+# name: 'local'
+# control_port: '/var/run/tor/control'
+# password: <password>
diff --git a/collectors/python.d.plugin/traefik/Makefile.inc b/collectors/python.d.plugin/traefik/Makefile.inc
new file mode 100644
index 00000000..926d56dd
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += traefik/traefik.chart.py
+dist_pythonconfig_DATA += traefik/traefik.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += traefik/README.md traefik/Makefile.inc
+
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
new file mode 100644
index 00000000..40ed24f0
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -0,0 +1,98 @@
+<!--
+title: "Traefik monitoring with Netdata"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/traefik/README.md"
+sidebar_label: "traefik-python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Integrations/Monitor/Webapps"
+-->
+
+# Traefik collector
+
+Uses the `health` API to provide statistics.
+
+It produces:
+
+1. **Responses** by statuses
+
+ - success (1xx, 2xx, 304)
+ - error (5xx)
+ - redirect (3xx except 304)
+ - bad (4xx)
+ - other (all other responses)
+
+2. **Responses** by codes
+
+ - 2xx (successful)
+ - 5xx (internal server errors)
+ - 3xx (redirect)
+ - 4xx (bad)
+ - 1xx (informational)
+ - other (non-standart responses)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Requests**/s
+
+ - request statistics
+
+5. **Total response time**
+
+ - sum of all response time
+
+6. **Average response time**
+
+7. **Average response time per iteration**
+
+8. **Uptime**
+
+ - Traefik server uptime
+
+## Configuration
+
+Edit the `python.d/traefik.conf` configuration file using `edit-config` from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically
+at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/traefik.conf
+```
+
+Needs only `url` to server's `health`
+
+Here is an example for local server:
+
+```yaml
+update_every: 1
+priority: 60000
+
+local:
+ url: 'http://localhost:8080/health'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/health`.
+
+
+
+
+### Troubleshooting
+
+To troubleshoot issues with the `traefik` module, run the `python.d.plugin` with the debug option enabled. The
+output will give you the output of the data collection job or error messages on why the collector isn't working.
+
+First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
+not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
+plugin's directory, switch to the `netdata` user.
+
+```bash
+cd /usr/libexec/netdata/plugins.d/
+sudo su -s /bin/bash netdata
+```
+
+Now you can manually run the `traefik` module in debug mode:
+
+```bash
+./python.d.plugin traefik debug trace
+```
+
diff --git a/collectors/python.d.plugin/traefik/metadata.yaml b/collectors/python.d.plugin/traefik/metadata.yaml
new file mode 100644
index 00000000..dcfb098a
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/metadata.yaml
@@ -0,0 +1,125 @@
+# This collector will not appear in documentation, as the go version is preferred,
+# https://github.com/netdata/go.d.plugin/blob/master/modules/traefik/README.md
+#
+# meta:
+# plugin_name: python.d.plugin
+# module_name: traefik
+# monitored_instance:
+# name: python.d traefik
+# link: ''
+# categories: []
+# icon_filename: ''
+# related_resources:
+# integrations:
+# list: []
+# info_provided_to_referring_integrations:
+# description: ''
+# keywords: []
+# most_popular: false
+# overview:
+# data_collection:
+# metrics_description: ''
+# method_description: ''
+# supported_platforms:
+# include: []
+# exclude: []
+# multi_instance: true
+# additional_permissions:
+# description: ''
+# default_behavior:
+# auto_detection:
+# description: ''
+# limits:
+# description: ''
+# performance_impact:
+# description: ''
+# setup:
+# prerequisites:
+# list: []
+# configuration:
+# file:
+# name: ''
+# description: ''
+# options:
+# description: ''
+# folding:
+# title: ''
+# enabled: true
+# list: []
+# examples:
+# folding:
+# enabled: true
+# title: ''
+# list: []
+# troubleshooting:
+# problems:
+# list: []
+# alerts: []
+# metrics:
+# folding:
+# title: Metrics
+# enabled: false
+# description: ""
+# availability: []
+# scopes:
+# - name: global
+# description: ""
+# labels: []
+# metrics:
+# - name: traefik.response_statuses
+# description: Response statuses
+# unit: "requests/s"
+# chart_type: stacked
+# dimensions:
+# - name: success
+# - name: error
+# - name: redirect
+# - name: bad
+# - name: other
+# - name: traefik.response_codes
+# description: Responses by codes
+# unit: "requests/s"
+# chart_type: stacked
+# dimensions:
+# - name: 2xx
+# - name: 5xx
+# - name: 3xx
+# - name: 4xx
+# - name: 1xx
+# - name: other
+# - name: traefik.detailed_response_codes
+# description: Detailed response codes
+# unit: "requests/s"
+# chart_type: stacked
+# dimensions:
+# - name: a dimension for each response code family
+# - name: traefik.requests
+# description: Requests
+# unit: "requests/s"
+# chart_type: line
+# dimensions:
+# - name: requests
+# - name: traefik.total_response_time
+# description: Total response time
+# unit: "seconds"
+# chart_type: line
+# dimensions:
+# - name: response
+# - name: traefik.average_response_time
+# description: Average response time
+# unit: "milliseconds"
+# chart_type: line
+# dimensions:
+# - name: response
+# - name: traefik.average_response_time_per_iteration
+# description: Average response time per iteration
+# unit: "milliseconds"
+# chart_type: line
+# dimensions:
+# - name: response
+# - name: traefik.uptime
+# description: Uptime
+# unit: "seconds"
+# chart_type: line
+# dimensions:
+# - name: uptime
diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
new file mode 100644
index 00000000..5a498467
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+# Description: traefik netdata python.d module
+# Author: Alexandre Menezes (@ale_menezes)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import defaultdict
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'response_statuses',
+ 'response_codes',
+ 'detailed_response_codes',
+ 'requests',
+ 'total_response_time',
+ 'average_response_time',
+ 'average_response_time_per_iteration',
+ 'uptime'
+]
+
+CHARTS = {
+ 'response_statuses': {
+ 'options': [None, 'Response statuses', 'requests/s', 'responses', 'traefik.response_statuses', 'stacked'],
+ 'lines': [
+ ['successful_requests', 'success', 'incremental'],
+ ['server_errors', 'error', 'incremental'],
+ ['redirects', 'redirect', 'incremental'],
+ ['bad_requests', 'bad', 'incremental'],
+ ['other_requests', 'other', 'incremental']
+ ]
+ },
+ 'response_codes': {
+ 'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'],
+ 'lines': [
+ ['2xx', None, 'incremental'],
+ ['5xx', None, 'incremental'],
+ ['3xx', None, 'incremental'],
+ ['4xx', None, 'incremental'],
+ ['1xx', None, 'incremental'],
+ ['other', None, 'incremental']
+ ]
+ },
+ 'detailed_response_codes': {
+ 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes',
+ 'stacked'],
+ 'lines': []
+ },
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'],
+ 'lines': [
+ ['total_count', 'requests', 'incremental']
+ ]
+ },
+ 'total_response_time': {
+ 'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'],
+ 'lines': [
+ ['total_response_time_sec', 'response', 'absolute', 1, 10000]
+ ]
+ },
+ 'average_response_time': {
+ 'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'],
+ 'lines': [
+ ['average_response_time_sec', 'response', 'absolute', 1, 1000]
+ ]
+ },
+ 'average_response_time_per_iteration': {
+ 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings',
+ 'traefik.average_response_time_per_iteration', 'line'],
+ 'lines': [
+ ['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000]
+ ]
+ },
+ 'uptime': {
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'],
+ 'lines': [
+ ['uptime_sec', 'uptime', 'absolute']
+ ]
+ }
+}
+
+HEALTH_STATS = [
+ 'uptime_sec',
+ 'average_response_time_sec',
+ 'total_response_time_sec',
+ 'total_count',
+ 'total_status_code_count'
+]
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url', 'http://localhost:8080/health')
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.last_total_response_time = 0
+ self.last_total_count = 0
+ self.data = {
+ 'successful_requests': 0,
+ 'redirects': 0,
+ 'bad_requests': 0,
+ 'server_errors': 0,
+ 'other_requests': 0,
+ '1xx': 0,
+ '2xx': 0,
+ '3xx': 0,
+ '4xx': 0,
+ '5xx': 0,
+ 'other': 0,
+ 'average_response_time_per_iteration_sec': 0,
+ }
+
+ def _get_data(self):
+ data = self._get_raw_data()
+
+ if not data:
+ return None
+
+ data = loads(data)
+
+ self.get_data_per_code_status(raw_data=data)
+
+ self.get_data_per_code_family(raw_data=data)
+
+ self.get_data_per_code(raw_data=data)
+
+ self.data.update(fetch_data_(raw_data=data, metrics=HEALTH_STATS))
+
+ self.data['average_response_time_sec'] *= 1000000
+ self.data['total_response_time_sec'] *= 10000
+ if data['total_count'] != self.last_total_count:
+ self.data['average_response_time_per_iteration_sec'] = \
+ (data['total_response_time_sec'] - self.last_total_response_time) * \
+ 1000000 / (data['total_count'] - self.last_total_count)
+ else:
+ self.data['average_response_time_per_iteration_sec'] = 0
+ self.last_total_response_time = data['total_response_time_sec']
+ self.last_total_count = data['total_count']
+
+ return self.data or None
+
+ def get_data_per_code_status(self, raw_data):
+ data = defaultdict(int)
+ for code, value in raw_data['total_status_code_count'].items():
+ code_prefix = code[0]
+ if code_prefix == '1' or code_prefix == '2' or code == '304':
+ data['successful_requests'] += value
+ elif code_prefix == '3':
+ data['redirects'] += value
+ elif code_prefix == '4':
+ data['bad_requests'] += value
+ elif code_prefix == '5':
+ data['server_errors'] += value
+ else:
+ data['other_requests'] += value
+ self.data.update(data)
+
+ def get_data_per_code_family(self, raw_data):
+ data = defaultdict(int)
+ for code, value in raw_data['total_status_code_count'].items():
+ code_prefix = code[0]
+ if code_prefix == '1':
+ data['1xx'] += value
+ elif code_prefix == '2':
+ data['2xx'] += value
+ elif code_prefix == '3':
+ data['3xx'] += value
+ elif code_prefix == '4':
+ data['4xx'] += value
+ elif code_prefix == '5':
+ data['5xx'] += value
+ else:
+ data['other'] += value
+ self.data.update(data)
+
+ def get_data_per_code(self, raw_data):
+ for code, value in raw_data['total_status_code_count'].items():
+ if self.charts:
+ if code not in self.data:
+ self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
+ self.data[code] = value
+
+
+def fetch_data_(raw_data, metrics):
+ data = dict()
+
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+
+ return data
diff --git a/collectors/python.d.plugin/traefik/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf
new file mode 100644
index 00000000..e3f182d3
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/traefik.conf
@@ -0,0 +1,77 @@
+# netdata python.d.plugin configuration for traefik health data API
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, traefik plugin also supports the following:
+#
+# url: '<scheme>://<host>:<port>/<health_page_api>'
+# # http://localhost:8080/health
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ url: 'http://localhost:8080/health'
diff --git a/collectors/python.d.plugin/uwsgi/Makefile.inc b/collectors/python.d.plugin/uwsgi/Makefile.inc
new file mode 100644
index 00000000..75d96de0
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += uwsgi/uwsgi.chart.py
+dist_pythonconfig_DATA += uwsgi/uwsgi.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += uwsgi/README.md uwsgi/Makefile.inc
+
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
new file mode 120000
index 00000000..44b85594
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -0,0 +1 @@
+integrations/uwsgi.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md b/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md
new file mode 100644
index 00000000..af58608b
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md
@@ -0,0 +1,219 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/uwsgi/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/uwsgi/metadata.yaml"
+sidebar_label: "uWSGI"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# uWSGI
+
+
+<img src="https://netdata.cloud/img/uwsgi.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: uwsgi
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors uWSGI metrics about requests, workers, memory and more.
+
+It collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per uWSGI instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| uwsgi.requests | a dimension per worker | requests/s |
+| uwsgi.tx | a dimension per worker | KiB/s |
+| uwsgi.avg_rt | a dimension per worker | milliseconds |
+| uwsgi.memory_rss | a dimension per worker | MiB |
+| uwsgi.memory_vsz | a dimension per worker | MiB |
+| uwsgi.exceptions | exceptions | exceptions |
+| uwsgi.harakiris | harakiris | harakiris |
+| uwsgi.respawns | respawns | respawns |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable the uWSGI Stats server
+
+Make sure that you uWSGI exposes it's metrics via a Stats server.
+
+Source: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/uwsgi.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/uwsgi.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |
+| socket | The 'path/to/uwsgistats.sock' | no | no |
+| host | The host to connect to | no | no |
+| port | The port to connect to | no | no |
+
+</details>
+
+#### Examples
+
+##### Basic (default out-of-the-box)
+
+A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.
+
+<details><summary>Config</summary>
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 1717
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 1717
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details><summary>Config</summary>
+
+```yaml
+local:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+
+remote:
+ name : 'remote'
+ host : '192.0.2.1'
+ port : 1717
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `uwsgi` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin uwsgi debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/uwsgi/metadata.yaml b/collectors/python.d.plugin/uwsgi/metadata.yaml
new file mode 100644
index 00000000..cdb090ac
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/metadata.yaml
@@ -0,0 +1,201 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: uwsgi
+ monitored_instance:
+ name: uWSGI
+ link: "https://github.com/unbit/uwsgi/tree/2.0.21"
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: "uwsgi.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - application server
+ - python
+ - web applications
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors uWSGI metrics about requests, workers, memory and more."
+ method_description: "It collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "This collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable the uWSGI Stats server
+ description: |
+ Make sure that you uWSGI exposes it's metrics via a Stats server.
+
+ Source: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
+ configuration:
+ file:
+ name: "python.d/uwsgi.conf"
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: The JOB's name as it will appear at the dashboard (by default is the job_name)
+ default_value: job_name
+ required: false
+ - name: socket
+ description: The 'path/to/uwsgistats.sock'
+ default_value: no
+ required: false
+ - name: host
+ description: The host to connect to
+ default_value: no
+ required: false
+ - name: port
+ description: The port to connect to
+ default_value: no
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic (default out-of-the-box)
+ description: A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.
+ config: |
+ socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+ localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+
+ localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 1717
+
+ localipv6:
+ name : 'local'
+ host : '::1'
+ port : 1717
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ local:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+
+ remote:
+ name : 'remote'
+ host : '192.0.2.1'
+ port : 1717
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: uwsgi.requests
+ description: Requests
+ unit: "requests/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per worker
+ - name: uwsgi.tx
+ description: Transmitted data
+ unit: "KiB/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per worker
+ - name: uwsgi.avg_rt
+ description: Average request time
+ unit: "milliseconds"
+ chart_type: line
+ dimensions:
+ - name: a dimension per worker
+ - name: uwsgi.memory_rss
+ description: RSS (Resident Set Size)
+ unit: "MiB"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per worker
+ - name: uwsgi.memory_vsz
+ description: VSZ (Virtual Memory Size)
+ unit: "MiB"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per worker
+ - name: uwsgi.exceptions
+ description: Exceptions
+ unit: "exceptions"
+ chart_type: line
+ dimensions:
+ - name: exceptions
+ - name: uwsgi.harakiris
+ description: Harakiris
+ unit: "harakiris"
+ chart_type: line
+ dimensions:
+ - name: harakiris
+ - name: uwsgi.respawns
+ description: Respawns
+ unit: "respawns"
+ chart_type: line
+ dimensions:
+ - name: respawns
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
new file mode 100644
index 00000000..e4d90000
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Description: uwsgi netdata python.d module
+# Author: Robbert Segeren (robbert-ef)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import json
+from copy import deepcopy
+
+from bases.FrameworkServices.SocketService import SocketService
+
+ORDER = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+ 'exceptions',
+ 'harakiri',
+ 'respawn',
+]
+
+DYNAMIC_CHARTS = [
+ 'requests',
+ 'tx',
+ 'avg_rt',
+ 'memory_rss',
+ 'memory_vsz',
+]
+
+# NOTE: lines are created dynamically in `check()` method
+CHARTS = {
+ 'requests': {
+ 'options': [None, 'Requests', 'requests/s', 'requests', 'uwsgi.requests', 'stacked'],
+ 'lines': [
+ ['requests', 'requests', 'incremental']
+ ]
+ },
+ 'tx': {
+ 'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'],
+ 'lines': [
+ ['tx', 'tx', 'incremental']
+ ]
+ },
+ 'avg_rt': {
+ 'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'],
+ 'lines': [
+ ['avg_rt', 'avg_rt', 'absolute']
+ ]
+ },
+ 'memory_rss': {
+ 'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'],
+ 'lines': [
+ ['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'memory_vsz': {
+ 'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
+ 'lines': [
+ ['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20]
+ ]
+ },
+ 'exceptions': {
+ 'options': [None, 'Exceptions', 'exceptions', 'exceptions', 'uwsgi.exceptions', 'line'],
+ 'lines': [
+ ['exceptions', 'exceptions', 'incremental']
+ ]
+ },
+ 'harakiri': {
+ 'options': [None, 'Harakiris', 'harakiris', 'harakiris', 'uwsgi.harakiris', 'line'],
+ 'lines': [
+ ['harakiri_count', 'harakiris', 'incremental']
+ ]
+ },
+ 'respawn': {
+ 'options': [None, 'Respawns', 'respawns', 'respawns', 'uwsgi.respawns', 'line'],
+ 'lines': [
+ ['respawn_count', 'respawns', 'incremental']
+ ]
+ },
+}
+
+
+class Service(SocketService):
+ def __init__(self, configuration=None, name=None):
+ super(Service, self).__init__(configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = deepcopy(CHARTS)
+ self.url = self.configuration.get('host', 'localhost')
+ self.port = self.configuration.get('port', 1717)
+ # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
+ for chart in DYNAMIC_CHARTS:
+ self.definitions[chart]['lines'] = []
+ self.last_result = {}
+ self.workers = []
+
+ def read_data(self):
+ """
+ Read data from socket and parse as JSON.
+ :return: (dict) stats
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ try:
+ return json.loads(raw_data)
+ except ValueError as err:
+ self.error(err)
+ return None
+
+ def check(self):
+ """
+ Parse configuration and check if we can read data.
+ :return: boolean
+ """
+ self._parse_config()
+ return bool(self.read_data())
+
+ def add_worker_dimensions(self, key):
+ """
+ Helper to add dimensions for a worker.
+ :param key: (int or str) worker identifier
+ :return:
+ """
+ for chart in DYNAMIC_CHARTS:
+ for line in CHARTS[chart]['lines']:
+ dimension_id = '{}_{}'.format(line[0], key)
+ dimension_name = str(key)
+
+ dimension = [dimension_id, dimension_name] + line[2:]
+ self.charts[chart].add_dimension(dimension)
+
+ @staticmethod
+ def _check_raw_data(data):
+ # The server will close the connection when it's done sending
+ # data, so just keep looping until that happens.
+ return False
+
+ def _get_data(self):
+ """
+ Read data from socket
+ :return: dict
+ """
+ stats = self.read_data()
+ if not stats:
+ return None
+
+ result = {
+ 'exceptions': 0,
+ 'harakiri_count': 0,
+ 'respawn_count': 0,
+ }
+
+ for worker in stats['workers']:
+ key = worker['pid']
+
+ # Add dimensions for new workers
+ if key not in self.workers:
+ self.add_worker_dimensions(key)
+ self.workers.append(key)
+
+ result['requests_{}'.format(key)] = worker['requests']
+ result['tx_{}'.format(key)] = worker['tx']
+ result['avg_rt_{}'.format(key)] = worker['avg_rt']
+
+ # avg_rt is not reset by uwsgi, so reset here
+ if self.last_result.get('requests_{}'.format(key)) == worker['requests']:
+ result['avg_rt_{}'.format(key)] = 0
+
+ result['memory_rss_{}'.format(key)] = worker['rss']
+ result['memory_vsz_{}'.format(key)] = worker['vsz']
+
+ result['exceptions'] += worker['exceptions']
+ result['harakiri_count'] += worker['harakiri_count']
+ result['respawn_count'] += worker['respawn_count']
+
+ self.last_result = result
+ return result
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf
new file mode 100644
index 00000000..7d09e733
--- /dev/null
+++ b/collectors/python.d.plugin/uwsgi/uwsgi.conf
@@ -0,0 +1,92 @@
+# netdata python.d.plugin configuration for uwsgi
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, uwsgi also supports the following:
+#
+# socket: 'path/to/uwsgistats.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+
+socket:
+ name : 'local'
+ socket : '/tmp/stats.socket'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 1717
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 1717
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 1717
diff --git a/collectors/python.d.plugin/varnish/Makefile.inc b/collectors/python.d.plugin/varnish/Makefile.inc
new file mode 100644
index 00000000..2469b059
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += varnish/varnish.chart.py
+dist_pythonconfig_DATA += varnish/varnish.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += varnish/README.md varnish/Makefile.inc
+
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
new file mode 120000
index 00000000..194be233
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -0,0 +1 @@
+integrations/varnish.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/varnish/integrations/varnish.md b/collectors/python.d.plugin/varnish/integrations/varnish.md
new file mode 100644
index 00000000..da74dcf8
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/integrations/varnish.md
@@ -0,0 +1,213 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/metadata.yaml"
+sidebar_label: "Varnish"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Varnish
+
+
+<img src="https://netdata.cloud/img/varnish.svg" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: varnish
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.
+
+Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
+
+
+It uses the `varnishstat` tool in order to collect the metrics.
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+`netdata` user must be a member of the `varnish` group.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Varnish instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| varnish.session_connection | accepted, dropped | connections/s |
+| varnish.client_requests | received | requests/s |
+| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |
+| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |
+| varnish.cached_objects_expired | objects | expired/s |
+| varnish.cached_objects_nuked | objects | nuked/s |
+| varnish.threads_total | None | number |
+| varnish.threads_statistics | created, failed, limited | threads/s |
+| varnish.threads_queue_len | in queue | requests |
+| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |
+| varnish.backend_requests | sent | requests/s |
+| varnish.esi_statistics | errors, warnings | problems/s |
+| varnish.memory_usage | free, allocated | MiB |
+| varnish.uptime | uptime | seconds |
+
+### Per Backend
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| varnish.backend | header, body | kilobits/s |
+
+### Per Storage
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| varnish.storage_usage | free, allocated | KiB |
+| varnish.storage_alloc_objs | allocated | objects |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Provide the necessary permissions
+
+In order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:
+
+```
+usermod -aG varnish netdata
+```
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/varnish.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/varnish.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |
+| update_every | Sets the default data collection frequency. | 10 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+```yaml
+job_name:
+ instance_name: '<name-of-varnishd-instance>'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin varnish debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/varnish/metadata.yaml b/collectors/python.d.plugin/varnish/metadata.yaml
new file mode 100644
index 00000000..d31c1cf6
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/metadata.yaml
@@ -0,0 +1,253 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: varnish
+ monitored_instance:
+ name: Varnish
+ link: https://varnish-cache.org/
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: 'varnish.svg'
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ''
+ keywords:
+ - varnish
+ - varnishstat
+ - varnishd
+ - cache
+ - web server
+ - web cache
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.
+
+ Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
+ method_description: |
+ It uses the `varnishstat` tool in order to collect the metrics.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: |
+ `netdata` user must be a member of the `varnish` group.
+ default_behavior:
+ auto_detection:
+ description: By default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.
+ limits:
+ description: ''
+ performance_impact:
+ description: ''
+ setup:
+ prerequisites:
+ list:
+ - title: Provide the necessary permissions
+ description: |
+ In order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:
+
+ ```
+ usermod -aG varnish netdata
+ ```
+ configuration:
+ file:
+ name: python.d/varnish.conf
+ description: ''
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: instance_name
+ description: the name of the varnishd instance to get logs from. If not specified, the local host name is used.
+ default_value: ""
+ required: true
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 10
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ''
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: 'Config'
+ list:
+ - name: Basic
+ description: An example configuration.
+ folding:
+ enabled: false
+ config: |
+ job_name:
+ instance_name: '<name-of-varnishd-instance>'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: varnish.session_connection
+ description: Connections Statistics
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: accepted
+ - name: dropped
+ - name: varnish.client_requests
+ description: Client Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: varnish.all_time_hit_rate
+ description: All History Hit Rate Ratio
+ unit: "percentage"
+ chart_type: stacked
+ dimensions:
+ - name: hit
+ - name: miss
+ - name: hitpass
+ - name: varnish.current_poll_hit_rate
+ description: Current Poll Hit Rate Ratio
+ unit: "percentage"
+ chart_type: stacked
+ dimensions:
+ - name: hit
+ - name: miss
+ - name: hitpass
+ - name: varnish.cached_objects_expired
+ description: Expired Objects
+ unit: "expired/s"
+ chart_type: line
+ dimensions:
+ - name: objects
+ - name: varnish.cached_objects_nuked
+ description: Least Recently Used Nuked Objects
+ unit: "nuked/s"
+ chart_type: line
+ dimensions:
+ - name: objects
+ - name: varnish.threads_total
+ description: Number Of Threads In All Pools
+ unit: "number"
+ chart_type: line
+ dimensions:
+ - name: None
+ - name: varnish.threads_statistics
+ description: Threads Statistics
+ unit: "threads/s"
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: failed
+ - name: limited
+ - name: varnish.threads_queue_len
+ description: Current Queue Length
+ unit: "requests"
+ chart_type: line
+ dimensions:
+ - name: in queue
+ - name: varnish.backend_connections
+ description: Backend Connections Statistics
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: successful
+ - name: unhealthy
+ - name: reused
+ - name: closed
+ - name: recycled
+ - name: failed
+ - name: varnish.backend_requests
+ description: Requests To The Backend
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: sent
+ - name: varnish.esi_statistics
+ description: ESI Statistics
+ unit: "problems/s"
+ chart_type: line
+ dimensions:
+ - name: errors
+ - name: warnings
+ - name: varnish.memory_usage
+ description: Memory Usage
+ unit: "MiB"
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: allocated
+ - name: varnish.uptime
+ description: Uptime
+ unit: "seconds"
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: Backend
+ description: ""
+ labels: []
+ metrics:
+ - name: varnish.backend
+ description: Backend {backend_name}
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: header
+ - name: body
+ - name: Storage
+ description: ""
+ labels: []
+ metrics:
+ - name: varnish.storage_usage
+ description: Storage {storage_name} Usage
+ unit: "KiB"
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: allocated
+ - name: varnish.storage_alloc_objs
+ description: Storage {storage_name} Allocated Objects
+ unit: "objects"
+ chart_type: line
+ dimensions:
+ - name: allocated
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
new file mode 100644
index 00000000..506ad026
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -0,0 +1,385 @@
+# -*- coding: utf-8 -*-
+# Description: varnish netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+ORDER = [
+ 'session_connections',
+ 'client_requests',
+ 'all_time_hit_rate',
+ 'current_poll_hit_rate',
+ 'cached_objects_expired',
+ 'cached_objects_nuked',
+ 'threads_total',
+ 'threads_statistics',
+ 'threads_queue_len',
+ 'backend_connections',
+ 'backend_requests',
+ 'esi_statistics',
+ 'memory_usage',
+ 'uptime'
+]
+
+CHARTS = {
+ 'session_connections': {
+ 'options': [None, 'Connections Statistics', 'connections/s',
+ 'client metrics', 'varnish.session_connection', 'line'],
+ 'lines': [
+ ['sess_conn', 'accepted', 'incremental'],
+ ['sess_dropped', 'dropped', 'incremental']
+ ]
+ },
+ 'client_requests': {
+ 'options': [None, 'Client Requests', 'requests/s',
+ 'client metrics', 'varnish.client_requests', 'line'],
+ 'lines': [
+ ['client_req', 'received', 'incremental']
+ ]
+ },
+ 'all_time_hit_rate': {
+ 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
+ 'varnish.all_time_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-absolute-row'],
+ ['cache_miss', 'miss', 'percentage-of-absolute-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
+ },
+ 'current_poll_hit_rate': {
+ 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
+ 'varnish.current_poll_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-incremental-row'],
+ ['cache_miss', 'miss', 'percentage-of-incremental-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row']
+ ]
+ },
+ 'cached_objects_expired': {
+ 'options': [None, 'Expired Objects', 'expired/s', 'cache performance',
+ 'varnish.cached_objects_expired', 'line'],
+ 'lines': [
+ ['n_expired', 'objects', 'incremental']
+ ]
+ },
+ 'cached_objects_nuked': {
+ 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance',
+ 'varnish.cached_objects_nuked', 'line'],
+ 'lines': [
+ ['n_lru_nuked', 'objects', 'incremental']
+ ]
+ },
+ 'threads_total': {
+ 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics',
+ 'varnish.threads_total', 'line'],
+ 'lines': [
+ ['threads', None, 'absolute']
+ ]
+ },
+ 'threads_statistics': {
+ 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics',
+ 'varnish.threads_statistics', 'line'],
+ 'lines': [
+ ['threads_created', 'created', 'incremental'],
+ ['threads_failed', 'failed', 'incremental'],
+ ['threads_limited', 'limited', 'incremental']
+ ]
+ },
+ 'threads_queue_len': {
+ 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics',
+ 'varnish.threads_queue_len', 'line'],
+ 'lines': [
+ ['thread_queue_len', 'in queue']
+ ]
+ },
+ 'backend_connections': {
+ 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics',
+ 'varnish.backend_connections', 'line'],
+ 'lines': [
+ ['backend_conn', 'successful', 'incremental'],
+ ['backend_unhealthy', 'unhealthy', 'incremental'],
+ ['backend_reuse', 'reused', 'incremental'],
+ ['backend_toolate', 'closed', 'incremental'],
+ ['backend_recycle', 'recycled', 'incremental'],
+ ['backend_fail', 'failed', 'incremental']
+ ]
+ },
+ 'backend_requests': {
+ 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics',
+ 'varnish.backend_requests', 'line'],
+ 'lines': [
+ ['backend_req', 'sent', 'incremental']
+ ]
+ },
+ 'esi_statistics': {
+ 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'],
+ 'lines': [
+ ['esi_errors', 'errors', 'incremental'],
+ ['esi_warnings', 'warnings', 'incremental']
+ ]
+ },
+ 'memory_usage': {
+ 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
+ 'lines': [
+ ['memory_free', 'free', 'absolute', 1, 1 << 20],
+ ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
+ },
+ 'uptime': {
+ 'lines': [
+ ['uptime', None, 'absolute']
+ ],
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line']
+ }
+}
+
+
+def backend_charts_template(name):
+ order = [
+ '{0}_response_statistics'.format(name),
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Backend "{0}"'.format(name), 'kilobits/s', 'backend response statistics',
+ 'varnish.backend', 'area'],
+ 'lines': [
+ ['{0}_beresp_hdrbytes'.format(name), 'header', 'incremental', 8, 1000],
+ ['{0}_beresp_bodybytes'.format(name), 'body', 'incremental', -8, 1000]
+ ]
+ },
+ }
+
+ return order, charts
+
+
+def storage_charts_template(name):
+ order = [
+ 'storage_{0}_usage'.format(name),
+ 'storage_{0}_alloc_objs'.format(name)
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Storage "{0}" Usage'.format(name), 'KiB', 'storage usage', 'varnish.storage_usage', 'stacked'],
+ 'lines': [
+ ['{0}.g_space'.format(name), 'free', 'absolute', 1, 1 << 10],
+ ['{0}.g_bytes'.format(name), 'allocated', 'absolute', 1, 1 << 10]
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Storage "{0}" Allocated Objects'.format(name), 'objects', 'storage usage', 'varnish.storage_alloc_objs', 'line'],
+ 'lines': [
+ ['{0}.g_alloc'.format(name), 'allocated', 'absolute']
+ ]
+ }
+ }
+
+ return order, charts
+
+
+VARNISHSTAT = 'varnishstat'
+
+re_version = re.compile(r'varnish-(?:plus-)?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
+
+
+class VarnishVersion:
+ def __init__(self, major, minor, patch):
+ self.major = major
+ self.minor = minor
+ self.patch = patch
+
+ def __str__(self):
+ return '{0}.{1}.{2}'.format(self.major, self.minor, self.patch)
+
+
+class Parser:
+ _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
+ _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_-]+).(beresp[\w_]+)\s+(\d+)')
+ _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
+
+ def __init__(self):
+ self.re_default = None
+ self.re_backend = None
+
+ def init(self, data):
+ data = ''.join(data)
+ parsed_main = Parser._default.findall(data)
+ if parsed_main:
+ self.re_default = Parser._default
+
+ parsed_backend = Parser._backend_new.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_new
+ else:
+ parsed_backend = Parser._backend_old.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_old
+
+ def server_stats(self, data):
+ return self.re_default.findall(''.join(data))
+
+ def backend_stats(self, data):
+ return self.re_backend.findall(''.join(data))
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.instance_name = configuration.get('instance_name')
+ self.parser = Parser()
+ self.command = None
+ self.collected_vbe = set()
+ self.collected_storages = set()
+
+ def create_command(self):
+ varnishstat = find_binary(VARNISHSTAT)
+
+ if not varnishstat:
+ self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT))
+ return False
+
+ command = [varnishstat, '-V']
+ reply = self._get_raw_data(stderr=True, command=command)
+ if not reply:
+ self.error(
+ "no output from '{0}'. Is varnish running? Not enough privileges?".format(' '.join(self.command)))
+ return False
+
+ ver = parse_varnish_version(reply)
+ if not ver:
+ self.error("failed to parse reply from '{0}', used regex :'{1}', reply : {2}".format(
+ ' '.join(command), re_version.pattern, reply))
+ return False
+
+ if self.instance_name:
+ self.command = [varnishstat, '-1', '-n', self.instance_name]
+ else:
+ self.command = [varnishstat, '-1']
+
+ if ver.major > 4:
+ self.command.extend(['-t', '1'])
+
+ self.info("varnish version: {0}, will use command: '{1}'".format(ver, ' '.join(self.command)))
+
+ return True
+
+ def check(self):
+ if not self.create_command():
+ return False
+
+ # STDOUT is not empty
+ reply = self._get_raw_data()
+ if not reply:
+ self.error("no output from '{0}'. Is it running? Not enough privileges?".format(' '.join(self.command)))
+ return False
+
+ self.parser.init(reply)
+
+ # Output is parsable
+ if not self.parser.re_default:
+ self.error('cant parse the output...')
+ return False
+
+ return True
+
+ def get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ data = dict()
+ server_stats = self.parser.server_stats(raw)
+ if not server_stats:
+ return None
+
+ stats = dict((param, value) for _, param, value in server_stats)
+ data.update(stats)
+
+ self.get_vbe_backends(data, raw)
+ self.get_storages(server_stats)
+
+ # varnish 5 uses default.g_bytes and default.g_space
+ data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
+ data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
+
+ return data
+
+ def get_vbe_backends(self, data, raw):
+ if not self.parser.re_backend:
+ return
+ stats = self.parser.backend_stats(raw)
+ if not stats:
+ return
+
+ for (name, param, value) in stats:
+ data['_'.join([name, param])] = value
+ if name in self.collected_vbe:
+ continue
+ self.collected_vbe.add(name)
+ self.add_backend_charts(name)
+
+ def get_storages(self, server_stats):
+ # Storage types:
+ # - SMF: File Storage
+ # - SMA: Malloc Storage
+ # - MSE: Massive Storage Engine (Varnish-Plus only)
+ #
+ # Stats example:
+ # [('SMF.', 'ssdStorage.c_req', '47686'),
+ # ('SMF.', 'ssdStorage.c_fail', '0'),
+ # ('SMF.', 'ssdStorage.c_bytes', '668102656'),
+ # ('SMF.', 'ssdStorage.c_freed', '140980224'),
+ # ('SMF.', 'ssdStorage.g_alloc', '39753'),
+ # ('SMF.', 'ssdStorage.g_bytes', '527122432'),
+ # ('SMF.', 'ssdStorage.g_space', '53159968768'),
+ # ('SMF.', 'ssdStorage.g_smf', '40130'),
+ # ('SMF.', 'ssdStorage.g_smf_frag', '311'),
+ # ('SMF.', 'ssdStorage.g_smf_large', '66')]
+ storages = [name for typ, name, _ in server_stats if typ.startswith(('SMF', 'SMA', 'MSE')) and name.endswith('g_space')]
+ if not storages:
+ return
+ for storage in storages:
+ storage = storage.split('.')[0]
+ if storage in self.collected_storages:
+ continue
+ self.collected_storages.add(storage)
+ self.add_storage_charts(storage)
+
+ def add_backend_charts(self, backend_name):
+ self.add_charts(backend_name, backend_charts_template)
+
+ def add_storage_charts(self, storage_name):
+ self.add_charts(storage_name, storage_charts_template)
+
+ def add_charts(self, name, charts_template):
+ order, charts = charts_template(name)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
+
+def parse_varnish_version(lines):
+ m = re_version.search(lines[0])
+ if not m:
+ return None
+
+ m = m.groupdict()
+ return VarnishVersion(
+ int(m['major']),
+ int(m['minor']),
+ int(m['patch']),
+ )
diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
new file mode 100644
index 00000000..54bfe4de
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for varnish
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, varnish also supports the following:
+#
+# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used.
+#
+# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/w1sensor/Makefile.inc b/collectors/python.d.plugin/w1sensor/Makefile.inc
new file mode 100644
index 00000000..bddf146f
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += w1sensor/w1sensor.chart.py
+dist_pythonconfig_DATA += w1sensor/w1sensor.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += w1sensor/README.md w1sensor/Makefile.inc
+
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
new file mode 120000
index 00000000..c0fa9cd1
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -0,0 +1 @@
+integrations/1-wire_sensors.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md b/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
new file mode 100644
index 00000000..fe3c05ba
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
@@ -0,0 +1,167 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/w1sensor/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/w1sensor/metadata.yaml"
+sidebar_label: "1-Wire Sensors"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# 1-Wire Sensors
+
+
+<img src="https://netdata.cloud/img/1-wire.png" width="150"/>
+
+
+Plugin: python.d.plugin
+Module: w1sensor
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.
+
+The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.
+
+This collector is only supported on the following platforms:
+
+- Linux
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The collector will try to auto detect available 1-Wire devices.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per 1-Wire Sensors instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| w1sensor.temp | a dimension per sensor | Celsius |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Required Linux kernel modules
+
+Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/w1sensor.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/w1sensor.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |
+
+</details>
+
+#### Examples
+
+##### Provide human readable names
+
+Associate two 1-Wire identifiers with human readable names.
+
+```yaml
+sensors:
+ name_00000022276e: 'Machine room'
+ name_00000022298f: 'Rack 12'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin w1sensor debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/w1sensor/metadata.yaml b/collectors/python.d.plugin/w1sensor/metadata.yaml
new file mode 100644
index 00000000..7b076823
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/metadata.yaml
@@ -0,0 +1,119 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: w1sensor
+ monitored_instance:
+ name: 1-Wire Sensors
+ link: "https://www.analog.com/en/product-category/1wire-temperature-sensors.html"
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ icon_filename: "1-wire.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - temperature
+ - sensor
+ - 1-wire
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts."
+ method_description: "The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "The collector will try to auto detect available 1-Wire devices."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "Required Linux kernel modules"
+ description: "Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded."
+ configuration:
+ file:
+ name: python.d/w1sensor.conf
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: name_<1-Wire id>
+ description: This allows associating a human readable name with a sensor's 1-Wire identifier.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Provide human readable names
+ description: Associate two 1-Wire identifiers with human readable names.
+ config: |
+ sensors:
+ name_00000022276e: 'Machine room'
+ name_00000022298f: 'Rack 12'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: w1sensor.temp
+ description: 1-Wire Temperature Sensor
+ unit: "Celsius"
+ chart_type: line
+ dimensions:
+ - name: a dimension per sensor
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
new file mode 100644
index 00000000..66797ced
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+# Description: 1-wire temperature monitor netdata python.d module
+# Author: Diomidis Spinellis <http://www.spinellis.gr>
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import os
+import re
+
+from bases.FrameworkServices.SimpleService import SimpleService
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+
+# Location where 1-Wire devices can be found
+W1_DIR = '/sys/bus/w1/devices/'
+
+# Lines matching the following regular expression contain a temperature value
+RE_TEMP = re.compile(r' t=(-?\d+)')
+
+ORDER = [
+ 'temp',
+]
+
+CHARTS = {
+ 'temp': {
+ 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'],
+ 'lines': []
+ }
+}
+
+# Known and supported family members
+# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c
+THERM_FAMILY = {
+ '10': 'W1_THERM_DS18S20',
+ '22': 'W1_THERM_DS1822',
+ '28': 'W1_THERM_DS18B20',
+ '3b': 'W1_THERM_DS1825',
+ '42': 'W1_THERM_DS28EA00',
+}
+
+
+class Service(SimpleService):
+ """Provide netdata service for 1-Wire sensors"""
+
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.probes = []
+
+ def check(self):
+ """Auto-detect available 1-Wire sensors, setting line definitions
+ and probes to be monitored."""
+ try:
+ file_names = os.listdir(W1_DIR)
+ except OSError as err:
+ self.error(err)
+ return False
+
+ lines = []
+ for file_name in file_names:
+ if file_name[2] != '-':
+ continue
+ if not file_name[0:2] in THERM_FAMILY:
+ continue
+
+ self.probes.append(file_name)
+ identifier = file_name[3:]
+ name = identifier
+ config_name = self.configuration.get('name_' + identifier)
+ if config_name:
+ name = config_name
+ lines.append(['w1sensor_temp_' + identifier, name, 'absolute',
+ 1, 10])
+ self.definitions['temp']['lines'] = lines
+ return len(self.probes) > 0
+
+ def get_data(self):
+ """Return data read from sensors."""
+ data = dict()
+
+ for file_name in self.probes:
+ file_path = W1_DIR + file_name + '/w1_slave'
+ identifier = file_name[3:]
+ try:
+ with open(file_path, 'r') as device_file:
+ for line in device_file:
+ matched = RE_TEMP.search(line)
+ if matched:
+ # Round to one decimal digit to filter-out noise
+ value = round(int(matched.group(1)) / 1000., 1)
+ value = int(value * 10)
+ data['w1sensor_temp_' + identifier] = value
+ except (OSError, IOError) as err:
+ self.error(err)
+ continue
+ return data or None
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf
new file mode 100644
index 00000000..b60d2865
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for w1sensor
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 5 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, w1sensor also supports the following:
+#
+# name_<1-Wire id>: '<human readable name>'
+# This allows associating a human readable name with a sensor's 1-Wire
+# identifier. Example:
+# name_00000022276e: 'Machine room'
+# name_00000022298f: 'Rack 12'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/zscores/Makefile.inc b/collectors/python.d.plugin/zscores/Makefile.inc
new file mode 100644
index 00000000..d8b18241
--- /dev/null
+++ b/collectors/python.d.plugin/zscores/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += zscores/zscores.chart.py
+dist_pythonconfig_DATA += zscores/zscores.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += zscores/README.md zscores/Makefile.inc
diff --git a/collectors/python.d.plugin/zscores/README.md b/collectors/python.d.plugin/zscores/README.md
new file mode 120000
index 00000000..159ce078
--- /dev/null
+++ b/collectors/python.d.plugin/zscores/README.md
@@ -0,0 +1 @@
+integrations/python.d_zscores.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md b/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
new file mode 100644
index 00000000..9d7d1c3d
--- /dev/null
+++ b/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
@@ -0,0 +1,195 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/zscores/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/zscores/metadata.yaml"
+sidebar_label: "python.d zscores"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Other"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# python.d zscores
+
+Plugin: python.d.plugin
+Module: zscores
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.
+
+
+This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev`
+for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).
+
+For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over
+time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per python.d zscores instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| zscores.z | a dimension per chart or dimension | z |
+| zscores.3stddev | a dimension per chart or dimension | count |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Python Requirements
+
+This collector will only work with Python 3 and requires the below packages be installed.
+
+```bash
+# become netdata user
+sudo su -s /bin/bash netdata
+# install required packages
+pip3 install numpy pandas requests netdata-pandas==0.0.38
+```
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `python.d/zscores.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config python.d/zscores.conf
+```
+#### Options
+
+There are 2 sections:
+
+* Global variables
+* One or more JOBS that can define multiple different instances to monitor.
+
+The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+
+
+<details><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes |
+| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |
+| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |
+| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |
+| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |
+| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |
+| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |
+| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |
+| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |
+| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+
+</details>
+
+#### Examples
+
+##### Default
+
+Default configuration.
+
+```yaml
+local:
+ name: 'local'
+ host: '127.0.0.1:19999'
+ charts_regex: 'system\..*'
+ charts_to_exclude: 'system.uptime'
+ train_secs: 14400
+ offset_secs: 300
+ train_every_n: 900
+ z_smooth_n: 15
+ z_clip: 10
+ z_abs: 'true'
+ burn_in: 2
+ mode: 'per_chart'
+ per_chart_agg: 'mean'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `python.d.plugin` to debug the collector:
+
+ ```bash
+ ./python.d.plugin zscores debug trace
+ ```
+
+
diff --git a/collectors/python.d.plugin/zscores/metadata.yaml b/collectors/python.d.plugin/zscores/metadata.yaml
new file mode 100644
index 00000000..388e9b46
--- /dev/null
+++ b/collectors/python.d.plugin/zscores/metadata.yaml
@@ -0,0 +1,187 @@
+plugin_name: python.d.plugin
+modules:
+ - meta:
+ plugin_name: python.d.plugin
+ module_name: zscores
+ monitored_instance:
+ name: python.d zscores
+ link: https://en.wikipedia.org/wiki/Standard_score
+ categories:
+ - data-collection.other
+ icon_filename: ""
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - zscore
+ - z-score
+ - standard score
+ - standard deviation
+ - anomaly detection
+ - statistical anomaly detection
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.
+ method_description: |
+ This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev`
+ for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).
+
+ For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over
+ time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Python Requirements
+ description: |
+ This collector will only work with Python 3 and requires the below packages be installed.
+
+ ```bash
+ # become netdata user
+ sudo su -s /bin/bash netdata
+ # install required packages
+ pip3 install numpy pandas requests netdata-pandas==0.0.38
+ ```
+ configuration:
+ file:
+ name: python.d/zscores.conf
+ description: ""
+ options:
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: charts_regex
+ description: what charts to pull data for - A regex like `system\..*|` or `system\..*|apps.cpu|apps.mem` etc.
+ default_value: "system\\..*"
+ required: true
+ - name: train_secs
+ description: length of time (in seconds) to base calculations off for mean and stddev.
+ default_value: 14400
+ required: true
+ - name: offset_secs
+ description: offset (in seconds) preceding latest data to ignore when calculating mean and stddev.
+ default_value: 300
+ required: true
+ - name: train_every_n
+ description: recalculate the mean and stddev every n steps of the collector.
+ default_value: 900
+ required: true
+ - name: z_smooth_n
+ description: smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values.
+ default_value: 15
+ required: true
+ - name: z_clip
+ description: cap absolute value of zscore (before smoothing) for better stability.
+ default_value: 10
+ required: true
+ - name: z_abs
+ description: "set z_abs: 'true' to make all zscores be absolute values only."
+ default_value: "true"
+ required: true
+ - name: burn_in
+ description: burn in period in which to initially calculate mean and stddev on every step.
+ default_value: 2
+ required: true
+ - name: mode
+ description: mode can be to get a zscore 'per_dim' or 'per_chart'.
+ default_value: per_chart
+ required: true
+ - name: per_chart_agg
+ description: per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'.
+ default_value: mean
+ required: true
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Default
+ description: Default configuration.
+ folding:
+ enabled: false
+ config: |
+ local:
+ name: 'local'
+ host: '127.0.0.1:19999'
+ charts_regex: 'system\..*'
+ charts_to_exclude: 'system.uptime'
+ train_secs: 14400
+ offset_secs: 300
+ train_every_n: 900
+ z_smooth_n: 15
+ z_clip: 10
+ z_abs: 'true'
+ burn_in: 2
+ mode: 'per_chart'
+ per_chart_agg: 'mean'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: zscores.z
+ description: Z Score
+ unit: "z"
+ chart_type: line
+ dimensions:
+ - name: a dimension per chart or dimension
+ - name: zscores.3stddev
+ description: Z Score >3
+ unit: "count"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per chart or dimension
diff --git a/collectors/python.d.plugin/zscores/zscores.chart.py b/collectors/python.d.plugin/zscores/zscores.chart.py
new file mode 100644
index 00000000..1099b937
--- /dev/null
+++ b/collectors/python.d.plugin/zscores/zscores.chart.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+# Description: zscores netdata python.d module
+# Author: andrewm4894
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from datetime import datetime
+import re
+
+import requests
+import numpy as np
+import pandas as pd
+
+from bases.FrameworkServices.SimpleService import SimpleService
+from netdata_pandas.data import get_data, get_allmetrics
+
+priority = 60000
+update_every = 5
+disabled_by_default = True
+
+ORDER = [
+ 'z',
+ '3stddev'
+]
+
+CHARTS = {
+ 'z': {
+ 'options': ['z', 'Z Score', 'z', 'Z Score', 'zscores.z', 'line'],
+ 'lines': []
+ },
+ '3stddev': {
+ 'options': ['3stddev', 'Z Score >3', 'count', '3 Stddev', 'zscores.3stddev', 'stacked'],
+ 'lines': []
+ },
+}
+
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.host = self.configuration.get('host', '127.0.0.1:19999')
+ self.charts_regex = re.compile(self.configuration.get('charts_regex', 'system.*'))
+ self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
+ self.charts_in_scope = [
+ c for c in
+ list(filter(self.charts_regex.match,
+ requests.get(f'http://{self.host}/api/v1/charts').json()['charts'].keys()))
+ if c not in self.charts_to_exclude
+ ]
+ self.train_secs = self.configuration.get('train_secs', 14400)
+ self.offset_secs = self.configuration.get('offset_secs', 300)
+ self.train_every_n = self.configuration.get('train_every_n', 900)
+ self.z_smooth_n = self.configuration.get('z_smooth_n', 15)
+ self.z_clip = self.configuration.get('z_clip', 10)
+ self.z_abs = bool(self.configuration.get('z_abs', True))
+ self.burn_in = self.configuration.get('burn_in', 2)
+ self.mode = self.configuration.get('mode', 'per_chart')
+ self.per_chart_agg = self.configuration.get('per_chart_agg', 'mean')
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.collected_dims = {'z': set(), '3stddev': set()}
+ self.df_mean = pd.DataFrame()
+ self.df_std = pd.DataFrame()
+ self.df_z_history = pd.DataFrame()
+
+ def check(self):
+ _ = get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.')
+ return True
+
+ def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
+ """If dimension not in chart then add it.
+ """
+ for dim in data:
+ if dim not in self.collected_dims[chart]:
+ self.collected_dims[chart].add(dim)
+ self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
+
+ for dim in list(self.collected_dims[chart]):
+ if dim not in data:
+ self.collected_dims[chart].remove(dim)
+ self.charts[chart].del_dimension(dim, hide=False)
+
+ def train_model(self):
+ """Calculate the mean and stddev for all relevant metrics and store them for use in calulcating zscore at each timestep.
+ """
+ before = int(datetime.now().timestamp()) - self.offset_secs
+ after = before - self.train_secs
+
+ self.df_mean = get_data(
+ self.host, self.charts_in_scope, after, before, points=10, group='average', col_sep='.'
+ ).mean().to_frame().rename(columns={0: "mean"})
+
+ self.df_std = get_data(
+ self.host, self.charts_in_scope, after, before, points=10, group='stddev', col_sep='.'
+ ).mean().to_frame().rename(columns={0: "std"})
+
+ def create_data(self, df_allmetrics):
+ """Use x, mean, stddev to generate z scores and 3stddev flags via some pandas manipulation.
+ Returning two dictionaries of dimensions and measures, one for each chart.
+
+ :param df_allmetrics <pd.DataFrame>: pandas dataframe with latest data from api/v1/allmetrics.
+ :return: (<dict>,<dict>) tuple of dictionaries, one for zscores and the other for a flag if abs(z)>3.
+ """
+ # calculate clipped z score for each available metric
+ df_z = pd.concat([self.df_mean, self.df_std, df_allmetrics], axis=1, join='inner')
+ df_z['z'] = ((df_z['value'] - df_z['mean']) / df_z['std']).clip(-self.z_clip, self.z_clip).fillna(0) * 100
+ if self.z_abs:
+ df_z['z'] = df_z['z'].abs()
+
+ # append last z_smooth_n rows of zscores to history table in wide format
+ self.df_z_history = self.df_z_history.append(
+ df_z[['z']].reset_index().pivot_table(values='z', columns='index'), sort=True
+ ).tail(self.z_smooth_n)
+
+ # get average zscore for last z_smooth_n for each metric
+ df_z_smooth = self.df_z_history.melt(value_name='z').groupby('index')['z'].mean().to_frame()
+ df_z_smooth['3stddev'] = np.where(abs(df_z_smooth['z']) > 300, 1, 0)
+ data_z = df_z_smooth['z'].add_suffix('_z').to_dict()
+
+ # aggregate to chart level if specified
+ if self.mode == 'per_chart':
+ df_z_smooth['chart'] = ['.'.join(x[0:2]) + '_z' for x in df_z_smooth.index.str.split('.').to_list()]
+ if self.per_chart_agg == 'absmax':
+ data_z = \
+ list(df_z_smooth.groupby('chart').agg({'z': lambda x: max(x, key=abs)})['z'].to_dict().values())[0]
+ else:
+ data_z = list(df_z_smooth.groupby('chart').agg({'z': [self.per_chart_agg]})['z'].to_dict().values())[0]
+
+ data_3stddev = {}
+ for k in data_z:
+ data_3stddev[k.replace('_z', '')] = 1 if abs(data_z[k]) > 300 else 0
+
+ return data_z, data_3stddev
+
+ def get_data(self):
+
+ if self.runs_counter <= self.burn_in or self.runs_counter % self.train_every_n == 0:
+ self.train_model()
+
+ data_z, data_3stddev = self.create_data(
+ get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.').transpose())
+ data = {**data_z, **data_3stddev}
+
+ self.validate_charts('z', data_z, divisor=100)
+ self.validate_charts('3stddev', data_3stddev)
+
+ return data
diff --git a/collectors/python.d.plugin/zscores/zscores.conf b/collectors/python.d.plugin/zscores/zscores.conf
new file mode 100644
index 00000000..07d62ebe
--- /dev/null
+++ b/collectors/python.d.plugin/zscores/zscores.conf
@@ -0,0 +1,108 @@
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, example also supports the following:
+#
+# - none
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+ name: 'local'
+
+ # what host to pull data from
+ host: '127.0.0.1:19999'
+
+ # what charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
+ charts_regex: 'system\..*'
+
+ # Charts to exclude, useful if you would like to exclude some specific charts.
+ # Note: should be a ',' separated string like 'chart.name,chart.name'.
+ charts_to_exclude: 'system.uptime'
+
+ # length of time to base calculations off for mean and stddev
+ train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore
+
+ # offset preceding latest data to ignore when calculating mean and stddev
+ offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev
+
+ # recalculate the mean and stddev every n steps of the collector
+ train_every_n: 900 # recalculate mean and stddev every 15 minutes
+
+ # smooth the z score by averaging it over last n values
+ z_smooth_n: 15 # take a rolling average of the last 15 zscore values to reduce sensitivity to temporary 'spikes'
+
+ # cap absolute value of zscore (before smoothing) for better stability
+ z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscores swamping any rolling average
+
+ # set z_abs: 'true' to make all zscores be absolute values only.
+ z_abs: 'true'
+
+ # burn in period in which to initially calculate mean and stddev on every step
+ burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return
+
+ # mode can be to get a zscore 'per_dim' or 'per_chart'
+ mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step
+
+ # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'
+ per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average.